1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zvfh,+zvfbfmin,+v \
3 ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
4 ; RUN: --check-prefixes=CHECK,RV32
5 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zvfh,+zvfbfmin,+v \
6 ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
7 ; RUN: --check-prefixes=CHECK,RV64
8 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zvfhmin,+zvfbfmin,+v \
9 ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
10 ; RUN: --check-prefixes=CHECK,RV32
11 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zvfhmin,+zvfbfmin,+v \
12 ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
13 ; RUN: --check-prefixes=CHECK,RV64
15 declare <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
17 define <vscale x 1 x i8> @mgather_nxv1i8(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru) {
18 ; RV32-LABEL: mgather_nxv1i8:
20 ; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
21 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
22 ; RV32-NEXT: vmv1r.v v8, v9
25 ; RV64-LABEL: mgather_nxv1i8:
27 ; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
28 ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
29 ; RV64-NEXT: vmv1r.v v8, v9
31 %v = call <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 1, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru)
32 ret <vscale x 1 x i8> %v
35 declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
37 define <vscale x 2 x i8> @mgather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
38 ; RV32-LABEL: mgather_nxv2i8:
40 ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
41 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
42 ; RV32-NEXT: vmv1r.v v8, v9
45 ; RV64-LABEL: mgather_nxv2i8:
47 ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
48 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
49 ; RV64-NEXT: vmv1r.v v8, v10
51 %v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
52 ret <vscale x 2 x i8> %v
55 define <vscale x 2 x i16> @mgather_nxv2i8_sextload_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
56 ; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i16:
58 ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
59 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
60 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
61 ; RV32-NEXT: vsext.vf2 v8, v9
64 ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i16:
66 ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
67 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
68 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
69 ; RV64-NEXT: vsext.vf2 v8, v10
71 %v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
72 %ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i16>
73 ret <vscale x 2 x i16> %ev
76 define <vscale x 2 x i16> @mgather_nxv2i8_zextload_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
77 ; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i16:
79 ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
80 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
81 ; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
82 ; RV32-NEXT: vzext.vf2 v8, v9
85 ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i16:
87 ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
88 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
89 ; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
90 ; RV64-NEXT: vzext.vf2 v8, v10
92 %v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
93 %ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i16>
94 ret <vscale x 2 x i16> %ev
97 define <vscale x 2 x i32> @mgather_nxv2i8_sextload_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
98 ; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i32:
100 ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
101 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
102 ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
103 ; RV32-NEXT: vsext.vf4 v8, v9
106 ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i32:
108 ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
109 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
110 ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
111 ; RV64-NEXT: vsext.vf4 v8, v10
113 %v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
114 %ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i32>
115 ret <vscale x 2 x i32> %ev
118 define <vscale x 2 x i32> @mgather_nxv2i8_zextload_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
119 ; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i32:
121 ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
122 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
123 ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
124 ; RV32-NEXT: vzext.vf4 v8, v9
127 ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i32:
129 ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
130 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
131 ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
132 ; RV64-NEXT: vzext.vf4 v8, v10
134 %v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
135 %ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i32>
136 ret <vscale x 2 x i32> %ev
139 define <vscale x 2 x i64> @mgather_nxv2i8_sextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
140 ; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i64:
142 ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
143 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
144 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
145 ; RV32-NEXT: vsext.vf8 v10, v9
146 ; RV32-NEXT: vmv.v.v v8, v10
149 ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i64:
151 ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
152 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
153 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
154 ; RV64-NEXT: vsext.vf8 v8, v10
156 %v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
157 %ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i64>
158 ret <vscale x 2 x i64> %ev
161 define <vscale x 2 x i64> @mgather_nxv2i8_zextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
162 ; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i64:
164 ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
165 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
166 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
167 ; RV32-NEXT: vzext.vf8 v10, v9
168 ; RV32-NEXT: vmv.v.v v8, v10
171 ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i64:
173 ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
174 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
175 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
176 ; RV64-NEXT: vzext.vf8 v8, v10
178 %v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
179 %ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i64>
180 ret <vscale x 2 x i64> %ev
183 declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
185 define <vscale x 4 x i8> @mgather_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x i8> %passthru) {
186 ; RV32-LABEL: mgather_nxv4i8:
188 ; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
189 ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
190 ; RV32-NEXT: vmv1r.v v8, v10
193 ; RV64-LABEL: mgather_nxv4i8:
195 ; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
196 ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
197 ; RV64-NEXT: vmv1r.v v8, v12
199 %v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %m, <vscale x 4 x i8> %passthru)
200 ret <vscale x 4 x i8> %v
203 define <vscale x 4 x i8> @mgather_truemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i8> %passthru) {
204 ; RV32-LABEL: mgather_truemask_nxv4i8:
206 ; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
207 ; RV32-NEXT: vluxei32.v v10, (zero), v8
208 ; RV32-NEXT: vmv1r.v v8, v10
211 ; RV64-LABEL: mgather_truemask_nxv4i8:
213 ; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
214 ; RV64-NEXT: vluxei64.v v12, (zero), v8
215 ; RV64-NEXT: vmv1r.v v8, v12
217 %v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i8> %passthru)
218 ret <vscale x 4 x i8> %v
221 define <vscale x 4 x i8> @mgather_falsemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i8> %passthru) {
222 ; RV32-LABEL: mgather_falsemask_nxv4i8:
224 ; RV32-NEXT: vmv1r.v v8, v10
227 ; RV64-LABEL: mgather_falsemask_nxv4i8:
229 ; RV64-NEXT: vmv1r.v v8, v12
231 %v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i8> %passthru)
232 ret <vscale x 4 x i8> %v
235 declare <vscale x 8 x i8> @llvm.masked.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
237 define <vscale x 8 x i8> @mgather_nxv8i8(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x i8> %passthru) {
238 ; RV32-LABEL: mgather_nxv8i8:
240 ; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, mu
241 ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
242 ; RV32-NEXT: vmv.v.v v8, v12
245 ; RV64-LABEL: mgather_nxv8i8:
247 ; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, mu
248 ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
249 ; RV64-NEXT: vmv.v.v v8, v16
251 %v = call <vscale x 8 x i8> @llvm.masked.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %m, <vscale x 8 x i8> %passthru)
252 ret <vscale x 8 x i8> %v
255 define <vscale x 8 x i8> @mgather_baseidx_nxv8i8(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i8> %passthru) {
256 ; RV32-LABEL: mgather_baseidx_nxv8i8:
258 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
259 ; RV32-NEXT: vsext.vf4 v12, v8
260 ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu
261 ; RV32-NEXT: vluxei32.v v9, (a0), v12, v0.t
262 ; RV32-NEXT: vmv.v.v v8, v9
265 ; RV64-LABEL: mgather_baseidx_nxv8i8:
267 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
268 ; RV64-NEXT: vsext.vf8 v16, v8
269 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
270 ; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t
271 ; RV64-NEXT: vmv.v.v v8, v9
273 %ptrs = getelementptr inbounds i8, ptr %base, <vscale x 8 x i8> %idxs
274 %v = call <vscale x 8 x i8> @llvm.masked.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %m, <vscale x 8 x i8> %passthru)
275 ret <vscale x 8 x i8> %v
278 declare <vscale x 1 x i16> @llvm.masked.gather.nxv1i16.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i16>)
280 define <vscale x 1 x i16> @mgather_nxv1i16(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x i16> %passthru) {
281 ; RV32-LABEL: mgather_nxv1i16:
283 ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
284 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
285 ; RV32-NEXT: vmv1r.v v8, v9
288 ; RV64-LABEL: mgather_nxv1i16:
290 ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
291 ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
292 ; RV64-NEXT: vmv1r.v v8, v9
294 %v = call <vscale x 1 x i16> @llvm.masked.gather.nxv1i16.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 2, <vscale x 1 x i1> %m, <vscale x 1 x i16> %passthru)
295 ret <vscale x 1 x i16> %v
298 declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
300 define <vscale x 2 x i16> @mgather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
301 ; RV32-LABEL: mgather_nxv2i16:
303 ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
304 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
305 ; RV32-NEXT: vmv1r.v v8, v9
308 ; RV64-LABEL: mgather_nxv2i16:
310 ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
311 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
312 ; RV64-NEXT: vmv1r.v v8, v10
314 %v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
315 ret <vscale x 2 x i16> %v
318 define <vscale x 2 x i32> @mgather_nxv2i16_sextload_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
319 ; RV32-LABEL: mgather_nxv2i16_sextload_nxv2i32:
321 ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
322 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
323 ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
324 ; RV32-NEXT: vsext.vf2 v8, v9
327 ; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i32:
329 ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
330 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
331 ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
332 ; RV64-NEXT: vsext.vf2 v8, v10
334 %v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
335 %ev = sext <vscale x 2 x i16> %v to <vscale x 2 x i32>
336 ret <vscale x 2 x i32> %ev
339 define <vscale x 2 x i32> @mgather_nxv2i16_zextload_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
340 ; RV32-LABEL: mgather_nxv2i16_zextload_nxv2i32:
342 ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
343 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
344 ; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
345 ; RV32-NEXT: vzext.vf2 v8, v9
348 ; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i32:
350 ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
351 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
352 ; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
353 ; RV64-NEXT: vzext.vf2 v8, v10
355 %v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
356 %ev = zext <vscale x 2 x i16> %v to <vscale x 2 x i32>
357 ret <vscale x 2 x i32> %ev
360 define <vscale x 2 x i64> @mgather_nxv2i16_sextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
361 ; RV32-LABEL: mgather_nxv2i16_sextload_nxv2i64:
363 ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
364 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
365 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
366 ; RV32-NEXT: vsext.vf4 v10, v9
367 ; RV32-NEXT: vmv.v.v v8, v10
370 ; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i64:
372 ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
373 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
374 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
375 ; RV64-NEXT: vsext.vf4 v8, v10
377 %v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
378 %ev = sext <vscale x 2 x i16> %v to <vscale x 2 x i64>
379 ret <vscale x 2 x i64> %ev
382 define <vscale x 2 x i64> @mgather_nxv2i16_zextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
383 ; RV32-LABEL: mgather_nxv2i16_zextload_nxv2i64:
385 ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
386 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
387 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
388 ; RV32-NEXT: vzext.vf4 v10, v9
389 ; RV32-NEXT: vmv.v.v v8, v10
392 ; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i64:
394 ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
395 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
396 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
397 ; RV64-NEXT: vzext.vf4 v8, v10
399 %v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
400 %ev = zext <vscale x 2 x i16> %v to <vscale x 2 x i64>
401 ret <vscale x 2 x i64> %ev
404 declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
406 define <vscale x 4 x i16> @mgather_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x i16> %passthru) {
407 ; RV32-LABEL: mgather_nxv4i16:
409 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
410 ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
411 ; RV32-NEXT: vmv.v.v v8, v10
414 ; RV64-LABEL: mgather_nxv4i16:
416 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
417 ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
418 ; RV64-NEXT: vmv.v.v v8, v12
420 %v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %m, <vscale x 4 x i16> %passthru)
421 ret <vscale x 4 x i16> %v
424 define <vscale x 4 x i16> @mgather_truemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i16> %passthru) {
425 ; RV32-LABEL: mgather_truemask_nxv4i16:
427 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
428 ; RV32-NEXT: vluxei32.v v10, (zero), v8
429 ; RV32-NEXT: vmv.v.v v8, v10
432 ; RV64-LABEL: mgather_truemask_nxv4i16:
434 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
435 ; RV64-NEXT: vluxei64.v v12, (zero), v8
436 ; RV64-NEXT: vmv.v.v v8, v12
438 %v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i16> %passthru)
439 ret <vscale x 4 x i16> %v
442 define <vscale x 4 x i16> @mgather_falsemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i16> %passthru) {
443 ; RV32-LABEL: mgather_falsemask_nxv4i16:
445 ; RV32-NEXT: vmv1r.v v8, v10
448 ; RV64-LABEL: mgather_falsemask_nxv4i16:
450 ; RV64-NEXT: vmv1r.v v8, v12
452 %v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i16> %passthru)
453 ret <vscale x 4 x i16> %v
456 declare <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
458 define <vscale x 8 x i16> @mgather_nxv8i16(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
459 ; RV32-LABEL: mgather_nxv8i16:
461 ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu
462 ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
463 ; RV32-NEXT: vmv.v.v v8, v12
466 ; RV64-LABEL: mgather_nxv8i16:
468 ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu
469 ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
470 ; RV64-NEXT: vmv.v.v v8, v16
472 %v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
473 ret <vscale x 8 x i16> %v
476 define <vscale x 8 x i16> @mgather_baseidx_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
477 ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8i16:
479 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
480 ; RV32-NEXT: vsext.vf4 v12, v8
481 ; RV32-NEXT: vadd.vv v12, v12, v12
482 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
483 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
484 ; RV32-NEXT: vmv.v.v v8, v10
487 ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i16:
489 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
490 ; RV64-NEXT: vsext.vf8 v16, v8
491 ; RV64-NEXT: vadd.vv v16, v16, v16
492 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
493 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
494 ; RV64-NEXT: vmv.v.v v8, v10
496 %ptrs = getelementptr inbounds i16, ptr %base, <vscale x 8 x i8> %idxs
497 %v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
498 ret <vscale x 8 x i16> %v
501 define <vscale x 8 x i16> @mgather_baseidx_sext_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
502 ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i16:
504 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
505 ; RV32-NEXT: vsext.vf4 v12, v8
506 ; RV32-NEXT: vadd.vv v12, v12, v12
507 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
508 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
509 ; RV32-NEXT: vmv.v.v v8, v10
512 ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i16:
514 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
515 ; RV64-NEXT: vsext.vf8 v16, v8
516 ; RV64-NEXT: vadd.vv v16, v16, v16
517 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
518 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
519 ; RV64-NEXT: vmv.v.v v8, v10
521 %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
522 %ptrs = getelementptr inbounds i16, ptr %base, <vscale x 8 x i16> %eidxs
523 %v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
524 ret <vscale x 8 x i16> %v
527 define <vscale x 8 x i16> @mgather_baseidx_zext_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
528 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i16:
530 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
531 ; CHECK-NEXT: vwaddu.vv v12, v8, v8
532 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
533 ; CHECK-NEXT: vluxei16.v v10, (a0), v12, v0.t
534 ; CHECK-NEXT: vmv.v.v v8, v10
536 %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
537 %ptrs = getelementptr inbounds i16, ptr %base, <vscale x 8 x i16> %eidxs
538 %v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
539 ret <vscale x 8 x i16> %v
542 define <vscale x 8 x i16> @mgather_baseidx_nxv8i16(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
543 ; RV32-LABEL: mgather_baseidx_nxv8i16:
545 ; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
546 ; RV32-NEXT: vwadd.vv v12, v8, v8
547 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
548 ; RV32-NEXT: vmv.v.v v8, v10
551 ; RV64-LABEL: mgather_baseidx_nxv8i16:
553 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
554 ; RV64-NEXT: vsext.vf4 v16, v8
555 ; RV64-NEXT: vadd.vv v16, v16, v16
556 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
557 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
558 ; RV64-NEXT: vmv.v.v v8, v10
560 %ptrs = getelementptr inbounds i16, ptr %base, <vscale x 8 x i16> %idxs
561 %v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
562 ret <vscale x 8 x i16> %v
565 declare <vscale x 1 x i32> @llvm.masked.gather.nxv1i32.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i32>)
567 define <vscale x 1 x i32> @mgather_nxv1i32(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x i32> %passthru) {
568 ; RV32-LABEL: mgather_nxv1i32:
570 ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
571 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
572 ; RV32-NEXT: vmv1r.v v8, v9
575 ; RV64-LABEL: mgather_nxv1i32:
577 ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
578 ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
579 ; RV64-NEXT: vmv1r.v v8, v9
581 %v = call <vscale x 1 x i32> @llvm.masked.gather.nxv1i32.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 4, <vscale x 1 x i1> %m, <vscale x 1 x i32> %passthru)
582 ret <vscale x 1 x i32> %v
585 declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
587 define <vscale x 2 x i32> @mgather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru) {
588 ; RV32-LABEL: mgather_nxv2i32:
590 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
591 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
592 ; RV32-NEXT: vmv.v.v v8, v9
595 ; RV64-LABEL: mgather_nxv2i32:
597 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
598 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
599 ; RV64-NEXT: vmv.v.v v8, v10
601 %v = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru)
602 ret <vscale x 2 x i32> %v
605 define <vscale x 2 x i64> @mgather_nxv2i32_sextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru) {
606 ; RV32-LABEL: mgather_nxv2i32_sextload_nxv2i64:
608 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
609 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
610 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
611 ; RV32-NEXT: vsext.vf2 v10, v9
612 ; RV32-NEXT: vmv.v.v v8, v10
615 ; RV64-LABEL: mgather_nxv2i32_sextload_nxv2i64:
617 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
618 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
619 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
620 ; RV64-NEXT: vsext.vf2 v8, v10
622 %v = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru)
623 %ev = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
624 ret <vscale x 2 x i64> %ev
627 define <vscale x 2 x i64> @mgather_nxv2i32_zextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru) {
628 ; RV32-LABEL: mgather_nxv2i32_zextload_nxv2i64:
630 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
631 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
632 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
633 ; RV32-NEXT: vzext.vf2 v10, v9
634 ; RV32-NEXT: vmv.v.v v8, v10
637 ; RV64-LABEL: mgather_nxv2i32_zextload_nxv2i64:
639 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
640 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
641 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
642 ; RV64-NEXT: vzext.vf2 v8, v10
644 %v = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru)
645 %ev = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
646 ret <vscale x 2 x i64> %ev
649 declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
651 define <vscale x 4 x i32> @mgather_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x i32> %passthru) {
652 ; RV32-LABEL: mgather_nxv4i32:
654 ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
655 ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
656 ; RV32-NEXT: vmv.v.v v8, v10
659 ; RV64-LABEL: mgather_nxv4i32:
661 ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu
662 ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
663 ; RV64-NEXT: vmv.v.v v8, v12
665 %v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %m, <vscale x 4 x i32> %passthru)
666 ret <vscale x 4 x i32> %v
669 define <vscale x 4 x i32> @mgather_truemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i32> %passthru) {
670 ; RV32-LABEL: mgather_truemask_nxv4i32:
672 ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
673 ; RV32-NEXT: vluxei32.v v8, (zero), v8
676 ; RV64-LABEL: mgather_truemask_nxv4i32:
678 ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
679 ; RV64-NEXT: vluxei64.v v12, (zero), v8
680 ; RV64-NEXT: vmv.v.v v8, v12
682 %v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i32> %passthru)
683 ret <vscale x 4 x i32> %v
686 define <vscale x 4 x i32> @mgather_falsemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i32> %passthru) {
687 ; RV32-LABEL: mgather_falsemask_nxv4i32:
689 ; RV32-NEXT: vmv2r.v v8, v10
692 ; RV64-LABEL: mgather_falsemask_nxv4i32:
694 ; RV64-NEXT: vmv2r.v v8, v12
696 %v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> %passthru)
697 ret <vscale x 4 x i32> %v
700 declare <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
702 define <vscale x 8 x i32> @mgather_nxv8i32(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
703 ; RV32-LABEL: mgather_nxv8i32:
705 ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu
706 ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
707 ; RV32-NEXT: vmv.v.v v8, v12
710 ; RV64-LABEL: mgather_nxv8i32:
712 ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu
713 ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
714 ; RV64-NEXT: vmv.v.v v8, v16
716 %v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
717 ret <vscale x 8 x i32> %v
720 define <vscale x 8 x i32> @mgather_baseidx_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
721 ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8i32:
723 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
724 ; RV32-NEXT: vsext.vf4 v16, v8
725 ; RV32-NEXT: vsll.vi v8, v16, 2
726 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
727 ; RV32-NEXT: vmv.v.v v8, v12
730 ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i32:
732 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
733 ; RV64-NEXT: vsext.vf8 v16, v8
734 ; RV64-NEXT: vsll.vi v16, v16, 2
735 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
736 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
737 ; RV64-NEXT: vmv.v.v v8, v12
739 %ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i8> %idxs
740 %v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
741 ret <vscale x 8 x i32> %v
744 define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
745 ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i32:
747 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
748 ; RV32-NEXT: vsext.vf4 v16, v8
749 ; RV32-NEXT: vsll.vi v8, v16, 2
750 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
751 ; RV32-NEXT: vmv.v.v v8, v12
754 ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i32:
756 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
757 ; RV64-NEXT: vsext.vf8 v16, v8
758 ; RV64-NEXT: vsll.vi v16, v16, 2
759 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
760 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
761 ; RV64-NEXT: vmv.v.v v8, v12
763 %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
764 %ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
765 %v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
766 ret <vscale x 8 x i32> %v
769 define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
770 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i32:
772 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
773 ; CHECK-NEXT: vzext.vf2 v10, v8
774 ; CHECK-NEXT: vsll.vi v8, v10, 2
775 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
776 ; CHECK-NEXT: vluxei16.v v12, (a0), v8, v0.t
777 ; CHECK-NEXT: vmv.v.v v8, v12
779 %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
780 %ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
781 %v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
782 ret <vscale x 8 x i32> %v
785 define <vscale x 8 x i32> @mgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
786 ; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i32:
788 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
789 ; RV32-NEXT: vsext.vf2 v16, v8
790 ; RV32-NEXT: vsll.vi v8, v16, 2
791 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
792 ; RV32-NEXT: vmv.v.v v8, v12
795 ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i32:
797 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
798 ; RV64-NEXT: vsext.vf4 v16, v8
799 ; RV64-NEXT: vsll.vi v16, v16, 2
800 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
801 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
802 ; RV64-NEXT: vmv.v.v v8, v12
804 %ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i16> %idxs
805 %v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
806 ret <vscale x 8 x i32> %v
809 define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
810 ; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32:
812 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
813 ; RV32-NEXT: vsext.vf2 v16, v8
814 ; RV32-NEXT: vsll.vi v8, v16, 2
815 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
816 ; RV32-NEXT: vmv.v.v v8, v12
819 ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32:
821 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
822 ; RV64-NEXT: vsext.vf4 v16, v8
823 ; RV64-NEXT: vsll.vi v16, v16, 2
824 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
825 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
826 ; RV64-NEXT: vmv.v.v v8, v12
828 %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
829 %ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
830 %v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
831 ret <vscale x 8 x i32> %v
834 define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
835 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i32:
837 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
838 ; CHECK-NEXT: vzext.vf2 v16, v8
839 ; CHECK-NEXT: vsll.vi v8, v16, 2
840 ; CHECK-NEXT: vluxei32.v v12, (a0), v8, v0.t
841 ; CHECK-NEXT: vmv.v.v v8, v12
843 %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
844 %ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
845 %v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
846 ret <vscale x 8 x i32> %v
849 define <vscale x 8 x i32> @mgather_baseidx_nxv8i32(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
850 ; RV32-LABEL: mgather_baseidx_nxv8i32:
852 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
853 ; RV32-NEXT: vsll.vi v8, v8, 2
854 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
855 ; RV32-NEXT: vmv.v.v v8, v12
858 ; RV64-LABEL: mgather_baseidx_nxv8i32:
860 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
861 ; RV64-NEXT: vsext.vf2 v16, v8
862 ; RV64-NEXT: vsll.vi v16, v16, 2
863 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
864 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
865 ; RV64-NEXT: vmv.v.v v8, v12
867 %ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %idxs
868 %v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
869 ret <vscale x 8 x i32> %v
872 declare <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i64>)
874 define <vscale x 1 x i64> @mgather_nxv1i64(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x i64> %passthru) {
875 ; RV32-LABEL: mgather_nxv1i64:
877 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
878 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
879 ; RV32-NEXT: vmv.v.v v8, v9
882 ; RV64-LABEL: mgather_nxv1i64:
884 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
885 ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
886 ; RV64-NEXT: vmv.v.v v8, v9
888 %v = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 8, <vscale x 1 x i1> %m, <vscale x 1 x i64> %passthru)
889 ret <vscale x 1 x i64> %v
892 declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
894 define <vscale x 2 x i64> @mgather_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i64> %passthru) {
895 ; RV32-LABEL: mgather_nxv2i64:
897 ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
898 ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
899 ; RV32-NEXT: vmv.v.v v8, v10
902 ; RV64-LABEL: mgather_nxv2i64:
904 ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
905 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
906 ; RV64-NEXT: vmv.v.v v8, v10
908 %v = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %m, <vscale x 2 x i64> %passthru)
909 ret <vscale x 2 x i64> %v
912 declare <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i64>)
914 define <vscale x 4 x i64> @mgather_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x i64> %passthru) {
915 ; RV32-LABEL: mgather_nxv4i64:
917 ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
918 ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
919 ; RV32-NEXT: vmv.v.v v8, v12
922 ; RV64-LABEL: mgather_nxv4i64:
924 ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
925 ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
926 ; RV64-NEXT: vmv.v.v v8, v12
928 %v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> %m, <vscale x 4 x i64> %passthru)
929 ret <vscale x 4 x i64> %v
932 define <vscale x 4 x i64> @mgather_truemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i64> %passthru) {
933 ; RV32-LABEL: mgather_truemask_nxv4i64:
935 ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
936 ; RV32-NEXT: vluxei32.v v12, (zero), v8
937 ; RV32-NEXT: vmv.v.v v8, v12
940 ; RV64-LABEL: mgather_truemask_nxv4i64:
942 ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
943 ; RV64-NEXT: vluxei64.v v8, (zero), v8
945 %v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i64> %passthru)
946 ret <vscale x 4 x i64> %v
949 define <vscale x 4 x i64> @mgather_falsemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i64> %passthru) {
950 ; CHECK-LABEL: mgather_falsemask_nxv4i64:
952 ; CHECK-NEXT: vmv4r.v v8, v12
954 %v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i64> %passthru)
955 ret <vscale x 4 x i64> %v
958 declare <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
960 define <vscale x 8 x i64> @mgather_nxv8i64(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
961 ; RV32-LABEL: mgather_nxv8i64:
963 ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
964 ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
965 ; RV32-NEXT: vmv.v.v v8, v16
968 ; RV64-LABEL: mgather_nxv8i64:
970 ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
971 ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
972 ; RV64-NEXT: vmv.v.v v8, v16
974 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
975 ret <vscale x 8 x i64> %v
978 define <vscale x 8 x i64> @mgather_baseidx_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
979 ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8i64:
981 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
982 ; RV32-NEXT: vsext.vf4 v12, v8
983 ; RV32-NEXT: vsll.vi v8, v12, 3
984 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
985 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
986 ; RV32-NEXT: vmv.v.v v8, v16
989 ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i64:
991 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
992 ; RV64-NEXT: vsext.vf8 v24, v8
993 ; RV64-NEXT: vsll.vi v8, v24, 3
994 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
995 ; RV64-NEXT: vmv.v.v v8, v16
997 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i8> %idxs
998 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
999 ret <vscale x 8 x i64> %v
1002 define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1003 ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64:
1005 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1006 ; RV32-NEXT: vsext.vf4 v12, v8
1007 ; RV32-NEXT: vsll.vi v8, v12, 3
1008 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1009 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
1010 ; RV32-NEXT: vmv.v.v v8, v16
1013 ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64:
1015 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
1016 ; RV64-NEXT: vsext.vf8 v24, v8
1017 ; RV64-NEXT: vsll.vi v8, v24, 3
1018 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
1019 ; RV64-NEXT: vmv.v.v v8, v16
1021 %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
1022 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
1023 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1024 ret <vscale x 8 x i64> %v
1027 define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1028 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64:
1030 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1031 ; CHECK-NEXT: vzext.vf2 v10, v8
1032 ; CHECK-NEXT: vsll.vi v8, v10, 3
1033 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1034 ; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t
1035 ; CHECK-NEXT: vmv.v.v v8, v16
1037 %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
1038 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
1039 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1040 ret <vscale x 8 x i64> %v
1043 define <vscale x 8 x i64> @mgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1044 ; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i64:
1046 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1047 ; RV32-NEXT: vsext.vf2 v12, v8
1048 ; RV32-NEXT: vsll.vi v8, v12, 3
1049 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1050 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
1051 ; RV32-NEXT: vmv.v.v v8, v16
1054 ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i64:
1056 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
1057 ; RV64-NEXT: vsext.vf4 v24, v8
1058 ; RV64-NEXT: vsll.vi v8, v24, 3
1059 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
1060 ; RV64-NEXT: vmv.v.v v8, v16
1062 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i16> %idxs
1063 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1064 ret <vscale x 8 x i64> %v
1067 define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1068 ; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64:
1070 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1071 ; RV32-NEXT: vsext.vf2 v12, v8
1072 ; RV32-NEXT: vsll.vi v8, v12, 3
1073 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1074 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
1075 ; RV32-NEXT: vmv.v.v v8, v16
1078 ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64:
1080 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
1081 ; RV64-NEXT: vsext.vf4 v24, v8
1082 ; RV64-NEXT: vsll.vi v8, v24, 3
1083 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
1084 ; RV64-NEXT: vmv.v.v v8, v16
1086 %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
1087 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
1088 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1089 ret <vscale x 8 x i64> %v
1092 define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1093 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64:
1095 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1096 ; CHECK-NEXT: vzext.vf2 v12, v8
1097 ; CHECK-NEXT: vsll.vi v8, v12, 3
1098 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1099 ; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t
1100 ; CHECK-NEXT: vmv.v.v v8, v16
1102 %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
1103 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
1104 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1105 ret <vscale x 8 x i64> %v
1108 define <vscale x 8 x i64> @mgather_baseidx_nxv8i32_nxv8i64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1109 ; RV32-LABEL: mgather_baseidx_nxv8i32_nxv8i64:
1111 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1112 ; RV32-NEXT: vsll.vi v8, v8, 3
1113 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1114 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
1115 ; RV32-NEXT: vmv.v.v v8, v16
1118 ; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8i64:
1120 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
1121 ; RV64-NEXT: vsext.vf2 v24, v8
1122 ; RV64-NEXT: vsll.vi v8, v24, 3
1123 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
1124 ; RV64-NEXT: vmv.v.v v8, v16
1126 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
1127 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1128 ret <vscale x 8 x i64> %v
1131 define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1132 ; RV32-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64:
1134 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1135 ; RV32-NEXT: vsll.vi v8, v8, 3
1136 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1137 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
1138 ; RV32-NEXT: vmv.v.v v8, v16
1141 ; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64:
1143 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
1144 ; RV64-NEXT: vsext.vf2 v24, v8
1145 ; RV64-NEXT: vsll.vi v8, v24, 3
1146 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
1147 ; RV64-NEXT: vmv.v.v v8, v16
1149 %eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
1150 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
1151 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1152 ret <vscale x 8 x i64> %v
1155 define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1156 ; RV32-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64:
1158 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1159 ; RV32-NEXT: vsll.vi v8, v8, 3
1160 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1161 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
1162 ; RV32-NEXT: vmv.v.v v8, v16
1165 ; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64:
1167 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
1168 ; RV64-NEXT: vzext.vf2 v24, v8
1169 ; RV64-NEXT: vsll.vi v8, v24, 3
1170 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
1171 ; RV64-NEXT: vmv.v.v v8, v16
1173 %eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
1174 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
1175 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1176 ret <vscale x 8 x i64> %v
1179 define <vscale x 8 x i64> @mgather_baseidx_nxv8i64(ptr %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
1180 ; RV32-LABEL: mgather_baseidx_nxv8i64:
1182 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1183 ; RV32-NEXT: vnsrl.wi v24, v8, 0
1184 ; RV32-NEXT: vsll.vi v8, v24, 3
1185 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
1186 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
1187 ; RV32-NEXT: vmv.v.v v8, v16
1190 ; RV64-LABEL: mgather_baseidx_nxv8i64:
1192 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
1193 ; RV64-NEXT: vsll.vi v8, v8, 3
1194 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
1195 ; RV64-NEXT: vmv.v.v v8, v16
1197 %ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %idxs
1198 %v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
1199 ret <vscale x 8 x i64> %v
1202 declare <vscale x 16 x i64> @llvm.masked.gather.nxv16i64.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i64>)
1204 declare <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64 %idx)
1205 declare <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr>, <vscale x 8 x ptr>, i64 %idx)
1207 define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptrs1, <vscale x 16 x i1> %m, <vscale x 8 x i64> %passthru0, <vscale x 8 x i64> %passthru1, ptr %out) {
1208 ; RV32-LABEL: mgather_nxv16i64:
1210 ; RV32-NEXT: vl8re64.v v24, (a0)
1211 ; RV32-NEXT: csrr a0, vlenb
1212 ; RV32-NEXT: srli a2, a0, 3
1213 ; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
1214 ; RV32-NEXT: vslidedown.vx v7, v0, a2
1215 ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu
1216 ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
1217 ; RV32-NEXT: vmv1r.v v0, v7
1218 ; RV32-NEXT: vluxei32.v v24, (zero), v12, v0.t
1219 ; RV32-NEXT: slli a0, a0, 3
1220 ; RV32-NEXT: add a0, a1, a0
1221 ; RV32-NEXT: vs8r.v v24, (a0)
1222 ; RV32-NEXT: vs8r.v v16, (a1)
1225 ; RV64-LABEL: mgather_nxv16i64:
1227 ; RV64-NEXT: addi sp, sp, -16
1228 ; RV64-NEXT: .cfi_def_cfa_offset 16
1229 ; RV64-NEXT: csrr a3, vlenb
1230 ; RV64-NEXT: slli a3, a3, 3
1231 ; RV64-NEXT: sub sp, sp, a3
1232 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
1233 ; RV64-NEXT: addi a3, sp, 16
1234 ; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
1235 ; RV64-NEXT: vmv8r.v v16, v8
1236 ; RV64-NEXT: vl8re64.v v24, (a0)
1237 ; RV64-NEXT: csrr a0, vlenb
1238 ; RV64-NEXT: vl8re64.v v8, (a1)
1239 ; RV64-NEXT: srli a1, a0, 3
1240 ; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
1241 ; RV64-NEXT: vslidedown.vx v7, v0, a1
1242 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
1243 ; RV64-NEXT: vluxei64.v v24, (zero), v16, v0.t
1244 ; RV64-NEXT: vmv1r.v v0, v7
1245 ; RV64-NEXT: addi a1, sp, 16
1246 ; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
1247 ; RV64-NEXT: vluxei64.v v8, (zero), v16, v0.t
1248 ; RV64-NEXT: slli a0, a0, 3
1249 ; RV64-NEXT: add a0, a2, a0
1250 ; RV64-NEXT: vs8r.v v8, (a0)
1251 ; RV64-NEXT: vs8r.v v24, (a2)
1252 ; RV64-NEXT: csrr a0, vlenb
1253 ; RV64-NEXT: slli a0, a0, 3
1254 ; RV64-NEXT: add sp, sp, a0
1255 ; RV64-NEXT: .cfi_def_cfa sp, 16
1256 ; RV64-NEXT: addi sp, sp, 16
1257 ; RV64-NEXT: .cfi_def_cfa_offset 0
1259 %p0 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> undef, <vscale x 8 x ptr> %ptrs0, i64 0)
1260 %p1 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> %p0, <vscale x 8 x ptr> %ptrs1, i64 8)
1262 %pt0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %passthru0, i64 0)
1263 %pt1 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %pt0, <vscale x 8 x i64> %passthru1, i64 8)
1265 %v = call <vscale x 16 x i64> @llvm.masked.gather.nxv16i64.nxv16p0(<vscale x 16 x ptr> %p1, i32 8, <vscale x 16 x i1> %m, <vscale x 16 x i64> %pt1)
1266 store <vscale x 16 x i64> %v, ptr %out
1270 declare <vscale x 1 x bfloat> @llvm.masked.gather.nxv1bf16.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x bfloat>)
1272 define <vscale x 1 x bfloat> @mgather_nxv1bf16(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x bfloat> %passthru) {
1273 ; RV32-LABEL: mgather_nxv1bf16:
1275 ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
1276 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
1277 ; RV32-NEXT: vmv1r.v v8, v9
1280 ; RV64-LABEL: mgather_nxv1bf16:
1282 ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
1283 ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
1284 ; RV64-NEXT: vmv1r.v v8, v9
1286 %v = call <vscale x 1 x bfloat> @llvm.masked.gather.nxv1bf16.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 2, <vscale x 1 x i1> %m, <vscale x 1 x bfloat> %passthru)
1287 ret <vscale x 1 x bfloat> %v
1290 declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
1292 define <vscale x 2 x bfloat> @mgather_nxv2bf16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x bfloat> %passthru) {
1293 ; RV32-LABEL: mgather_nxv2bf16:
1295 ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
1296 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
1297 ; RV32-NEXT: vmv1r.v v8, v9
1300 ; RV64-LABEL: mgather_nxv2bf16:
1302 ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
1303 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
1304 ; RV64-NEXT: vmv1r.v v8, v10
1306 %v = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x bfloat> %passthru)
1307 ret <vscale x 2 x bfloat> %v
1310 declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
1312 define <vscale x 4 x bfloat> @mgather_nxv4bf16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x bfloat> %passthru) {
1313 ; RV32-LABEL: mgather_nxv4bf16:
1315 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
1316 ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
1317 ; RV32-NEXT: vmv.v.v v8, v10
1320 ; RV64-LABEL: mgather_nxv4bf16:
1322 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
1323 ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
1324 ; RV64-NEXT: vmv.v.v v8, v12
1326 %v = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %m, <vscale x 4 x bfloat> %passthru)
1327 ret <vscale x 4 x bfloat> %v
1330 define <vscale x 4 x bfloat> @mgather_truemask_nxv4bf16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x bfloat> %passthru) {
1331 ; RV32-LABEL: mgather_truemask_nxv4bf16:
1333 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1334 ; RV32-NEXT: vluxei32.v v10, (zero), v8
1335 ; RV32-NEXT: vmv.v.v v8, v10
1338 ; RV64-LABEL: mgather_truemask_nxv4bf16:
1340 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1341 ; RV64-NEXT: vluxei64.v v12, (zero), v8
1342 ; RV64-NEXT: vmv.v.v v8, v12
1344 %v = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x bfloat> %passthru)
1345 ret <vscale x 4 x bfloat> %v
1348 define <vscale x 4 x bfloat> @mgather_falsemask_nxv4bf16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x bfloat> %passthru) {
1349 ; RV32-LABEL: mgather_falsemask_nxv4bf16:
1351 ; RV32-NEXT: vmv1r.v v8, v10
1354 ; RV64-LABEL: mgather_falsemask_nxv4bf16:
1356 ; RV64-NEXT: vmv1r.v v8, v12
1358 %v = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x bfloat> %passthru)
1359 ret <vscale x 4 x bfloat> %v
1362 declare <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
1364 define <vscale x 8 x bfloat> @mgather_nxv8bf16(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru) {
1365 ; RV32-LABEL: mgather_nxv8bf16:
1367 ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu
1368 ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
1369 ; RV32-NEXT: vmv.v.v v8, v12
1372 ; RV64-LABEL: mgather_nxv8bf16:
1374 ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu
1375 ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
1376 ; RV64-NEXT: vmv.v.v v8, v16
1378 %v = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru)
1379 ret <vscale x 8 x bfloat> %v
1382 define <vscale x 8 x bfloat> @mgather_baseidx_nxv8i8_nxv8bf16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru) {
1383 ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8bf16:
1385 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1386 ; RV32-NEXT: vsext.vf4 v12, v8
1387 ; RV32-NEXT: vadd.vv v12, v12, v12
1388 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1389 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
1390 ; RV32-NEXT: vmv.v.v v8, v10
1393 ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8bf16:
1395 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1396 ; RV64-NEXT: vsext.vf8 v16, v8
1397 ; RV64-NEXT: vadd.vv v16, v16, v16
1398 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1399 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
1400 ; RV64-NEXT: vmv.v.v v8, v10
1402 %ptrs = getelementptr inbounds bfloat, ptr %base, <vscale x 8 x i8> %idxs
1403 %v = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru)
1404 ret <vscale x 8 x bfloat> %v
1407 define <vscale x 8 x bfloat> @mgather_baseidx_sext_nxv8i8_nxv8bf16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru) {
1408 ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8bf16:
1410 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1411 ; RV32-NEXT: vsext.vf4 v12, v8
1412 ; RV32-NEXT: vadd.vv v12, v12, v12
1413 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1414 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
1415 ; RV32-NEXT: vmv.v.v v8, v10
1418 ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8bf16:
1420 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1421 ; RV64-NEXT: vsext.vf8 v16, v8
1422 ; RV64-NEXT: vadd.vv v16, v16, v16
1423 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1424 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
1425 ; RV64-NEXT: vmv.v.v v8, v10
1427 %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
1428 %ptrs = getelementptr inbounds bfloat, ptr %base, <vscale x 8 x i16> %eidxs
1429 %v = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru)
1430 ret <vscale x 8 x bfloat> %v
1433 define <vscale x 8 x bfloat> @mgather_baseidx_zext_nxv8i8_nxv8bf16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru) {
1434 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8bf16:
1436 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
1437 ; CHECK-NEXT: vwaddu.vv v12, v8, v8
1438 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1439 ; CHECK-NEXT: vluxei16.v v10, (a0), v12, v0.t
1440 ; CHECK-NEXT: vmv.v.v v8, v10
1442 %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
1443 %ptrs = getelementptr inbounds bfloat, ptr %base, <vscale x 8 x i16> %eidxs
1444 %v = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru)
1445 ret <vscale x 8 x bfloat> %v
1448 define <vscale x 8 x bfloat> @mgather_baseidx_nxv8bf16(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru) {
1449 ; RV32-LABEL: mgather_baseidx_nxv8bf16:
1451 ; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
1452 ; RV32-NEXT: vwadd.vv v12, v8, v8
1453 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
1454 ; RV32-NEXT: vmv.v.v v8, v10
1457 ; RV64-LABEL: mgather_baseidx_nxv8bf16:
1459 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1460 ; RV64-NEXT: vsext.vf4 v16, v8
1461 ; RV64-NEXT: vadd.vv v16, v16, v16
1462 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1463 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
1464 ; RV64-NEXT: vmv.v.v v8, v10
1466 %ptrs = getelementptr inbounds bfloat, ptr %base, <vscale x 8 x i16> %idxs
1467 %v = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x bfloat> %passthru)
1468 ret <vscale x 8 x bfloat> %v
1471 declare <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x half>)
1473 define <vscale x 1 x half> @mgather_nxv1f16(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x half> %passthru) {
1474 ; RV32-LABEL: mgather_nxv1f16:
1476 ; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
1477 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
1478 ; RV32-NEXT: vmv1r.v v8, v9
1481 ; RV64-LABEL: mgather_nxv1f16:
1483 ; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
1484 ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
1485 ; RV64-NEXT: vmv1r.v v8, v9
1487 %v = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 2, <vscale x 1 x i1> %m, <vscale x 1 x half> %passthru)
1488 ret <vscale x 1 x half> %v
1491 declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
1493 define <vscale x 2 x half> @mgather_nxv2f16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x half> %passthru) {
1494 ; RV32-LABEL: mgather_nxv2f16:
1496 ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
1497 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
1498 ; RV32-NEXT: vmv1r.v v8, v9
1501 ; RV64-LABEL: mgather_nxv2f16:
1503 ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
1504 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
1505 ; RV64-NEXT: vmv1r.v v8, v10
1507 %v = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x half> %passthru)
1508 ret <vscale x 2 x half> %v
1511 declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
1513 define <vscale x 4 x half> @mgather_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x half> %passthru) {
1514 ; RV32-LABEL: mgather_nxv4f16:
1516 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
1517 ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
1518 ; RV32-NEXT: vmv.v.v v8, v10
1521 ; RV64-LABEL: mgather_nxv4f16:
1523 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
1524 ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
1525 ; RV64-NEXT: vmv.v.v v8, v12
1527 %v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %m, <vscale x 4 x half> %passthru)
1528 ret <vscale x 4 x half> %v
1531 define <vscale x 4 x half> @mgather_truemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x half> %passthru) {
1532 ; RV32-LABEL: mgather_truemask_nxv4f16:
1534 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1535 ; RV32-NEXT: vluxei32.v v10, (zero), v8
1536 ; RV32-NEXT: vmv.v.v v8, v10
1539 ; RV64-LABEL: mgather_truemask_nxv4f16:
1541 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
1542 ; RV64-NEXT: vluxei64.v v12, (zero), v8
1543 ; RV64-NEXT: vmv.v.v v8, v12
1545 %v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x half> %passthru)
1546 ret <vscale x 4 x half> %v
1549 define <vscale x 4 x half> @mgather_falsemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x half> %passthru) {
1550 ; RV32-LABEL: mgather_falsemask_nxv4f16:
1552 ; RV32-NEXT: vmv1r.v v8, v10
1555 ; RV64-LABEL: mgather_falsemask_nxv4f16:
1557 ; RV64-NEXT: vmv1r.v v8, v12
1559 %v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x half> %passthru)
1560 ret <vscale x 4 x half> %v
1563 declare <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
1565 define <vscale x 8 x half> @mgather_nxv8f16(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
1566 ; RV32-LABEL: mgather_nxv8f16:
1568 ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu
1569 ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
1570 ; RV32-NEXT: vmv.v.v v8, v12
1573 ; RV64-LABEL: mgather_nxv8f16:
1575 ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu
1576 ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
1577 ; RV64-NEXT: vmv.v.v v8, v16
1579 %v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
1580 ret <vscale x 8 x half> %v
1583 define <vscale x 8 x half> @mgather_baseidx_nxv8i8_nxv8f16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
1584 ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8f16:
1586 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1587 ; RV32-NEXT: vsext.vf4 v12, v8
1588 ; RV32-NEXT: vadd.vv v12, v12, v12
1589 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1590 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
1591 ; RV32-NEXT: vmv.v.v v8, v10
1594 ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f16:
1596 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1597 ; RV64-NEXT: vsext.vf8 v16, v8
1598 ; RV64-NEXT: vadd.vv v16, v16, v16
1599 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1600 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
1601 ; RV64-NEXT: vmv.v.v v8, v10
1603 %ptrs = getelementptr inbounds half, ptr %base, <vscale x 8 x i8> %idxs
1604 %v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
1605 ret <vscale x 8 x half> %v
1608 define <vscale x 8 x half> @mgather_baseidx_sext_nxv8i8_nxv8f16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
1609 ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f16:
1611 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1612 ; RV32-NEXT: vsext.vf4 v12, v8
1613 ; RV32-NEXT: vadd.vv v12, v12, v12
1614 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1615 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
1616 ; RV32-NEXT: vmv.v.v v8, v10
1619 ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f16:
1621 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1622 ; RV64-NEXT: vsext.vf8 v16, v8
1623 ; RV64-NEXT: vadd.vv v16, v16, v16
1624 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1625 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
1626 ; RV64-NEXT: vmv.v.v v8, v10
1628 %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
1629 %ptrs = getelementptr inbounds half, ptr %base, <vscale x 8 x i16> %eidxs
1630 %v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
1631 ret <vscale x 8 x half> %v
1634 define <vscale x 8 x half> @mgather_baseidx_zext_nxv8i8_nxv8f16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
1635 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f16:
1637 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
1638 ; CHECK-NEXT: vwaddu.vv v12, v8, v8
1639 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1640 ; CHECK-NEXT: vluxei16.v v10, (a0), v12, v0.t
1641 ; CHECK-NEXT: vmv.v.v v8, v10
1643 %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
1644 %ptrs = getelementptr inbounds half, ptr %base, <vscale x 8 x i16> %eidxs
1645 %v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
1646 ret <vscale x 8 x half> %v
1649 define <vscale x 8 x half> @mgather_baseidx_nxv8f16(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
1650 ; RV32-LABEL: mgather_baseidx_nxv8f16:
1652 ; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
1653 ; RV32-NEXT: vwadd.vv v12, v8, v8
1654 ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
1655 ; RV32-NEXT: vmv.v.v v8, v10
1658 ; RV64-LABEL: mgather_baseidx_nxv8f16:
1660 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1661 ; RV64-NEXT: vsext.vf4 v16, v8
1662 ; RV64-NEXT: vadd.vv v16, v16, v16
1663 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
1664 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
1665 ; RV64-NEXT: vmv.v.v v8, v10
1667 %ptrs = getelementptr inbounds half, ptr %base, <vscale x 8 x i16> %idxs
1668 %v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
1669 ret <vscale x 8 x half> %v
1672 declare <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x float>)
1674 define <vscale x 1 x float> @mgather_nxv1f32(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x float> %passthru) {
1675 ; RV32-LABEL: mgather_nxv1f32:
1677 ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
1678 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
1679 ; RV32-NEXT: vmv1r.v v8, v9
1682 ; RV64-LABEL: mgather_nxv1f32:
1684 ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
1685 ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
1686 ; RV64-NEXT: vmv1r.v v8, v9
1688 %v = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 4, <vscale x 1 x i1> %m, <vscale x 1 x float> %passthru)
1689 ret <vscale x 1 x float> %v
1692 declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
1694 define <vscale x 2 x float> @mgather_nxv2f32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x float> %passthru) {
1695 ; RV32-LABEL: mgather_nxv2f32:
1697 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
1698 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
1699 ; RV32-NEXT: vmv.v.v v8, v9
1702 ; RV64-LABEL: mgather_nxv2f32:
1704 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
1705 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
1706 ; RV64-NEXT: vmv.v.v v8, v10
1708 %v = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %m, <vscale x 2 x float> %passthru)
1709 ret <vscale x 2 x float> %v
1712 declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
1714 define <vscale x 4 x float> @mgather_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x float> %passthru) {
1715 ; RV32-LABEL: mgather_nxv4f32:
1717 ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
1718 ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
1719 ; RV32-NEXT: vmv.v.v v8, v10
1722 ; RV64-LABEL: mgather_nxv4f32:
1724 ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu
1725 ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
1726 ; RV64-NEXT: vmv.v.v v8, v12
1728 %v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %m, <vscale x 4 x float> %passthru)
1729 ret <vscale x 4 x float> %v
1732 define <vscale x 4 x float> @mgather_truemask_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x float> %passthru) {
1733 ; RV32-LABEL: mgather_truemask_nxv4f32:
1735 ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1736 ; RV32-NEXT: vluxei32.v v8, (zero), v8
1739 ; RV64-LABEL: mgather_truemask_nxv4f32:
1741 ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
1742 ; RV64-NEXT: vluxei64.v v12, (zero), v8
1743 ; RV64-NEXT: vmv.v.v v8, v12
1745 %v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x float> %passthru)
1746 ret <vscale x 4 x float> %v
1749 define <vscale x 4 x float> @mgather_falsemask_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x float> %passthru) {
1750 ; RV32-LABEL: mgather_falsemask_nxv4f32:
1752 ; RV32-NEXT: vmv2r.v v8, v10
1755 ; RV64-LABEL: mgather_falsemask_nxv4f32:
1757 ; RV64-NEXT: vmv2r.v v8, v12
1759 %v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> %passthru)
1760 ret <vscale x 4 x float> %v
1763 declare <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
1765 define <vscale x 8 x float> @mgather_nxv8f32(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
1766 ; RV32-LABEL: mgather_nxv8f32:
1768 ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu
1769 ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
1770 ; RV32-NEXT: vmv.v.v v8, v12
1773 ; RV64-LABEL: mgather_nxv8f32:
1775 ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu
1776 ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
1777 ; RV64-NEXT: vmv.v.v v8, v16
1779 %v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
1780 ret <vscale x 8 x float> %v
1783 define <vscale x 8 x float> @mgather_baseidx_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
1784 ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8f32:
1786 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
1787 ; RV32-NEXT: vsext.vf4 v16, v8
1788 ; RV32-NEXT: vsll.vi v8, v16, 2
1789 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
1790 ; RV32-NEXT: vmv.v.v v8, v12
1793 ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f32:
1795 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1796 ; RV64-NEXT: vsext.vf8 v16, v8
1797 ; RV64-NEXT: vsll.vi v16, v16, 2
1798 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
1799 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
1800 ; RV64-NEXT: vmv.v.v v8, v12
1802 %ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i8> %idxs
1803 %v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
1804 ret <vscale x 8 x float> %v
1807 define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
1808 ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f32:
1810 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
1811 ; RV32-NEXT: vsext.vf4 v16, v8
1812 ; RV32-NEXT: vsll.vi v8, v16, 2
1813 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
1814 ; RV32-NEXT: vmv.v.v v8, v12
1817 ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f32:
1819 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1820 ; RV64-NEXT: vsext.vf8 v16, v8
1821 ; RV64-NEXT: vsll.vi v16, v16, 2
1822 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
1823 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
1824 ; RV64-NEXT: vmv.v.v v8, v12
1826 %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
1827 %ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
1828 %v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
1829 ret <vscale x 8 x float> %v
1832 define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
1833 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f32:
1835 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1836 ; CHECK-NEXT: vzext.vf2 v10, v8
1837 ; CHECK-NEXT: vsll.vi v8, v10, 2
1838 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
1839 ; CHECK-NEXT: vluxei16.v v12, (a0), v8, v0.t
1840 ; CHECK-NEXT: vmv.v.v v8, v12
1842 %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
1843 %ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
1844 %v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
1845 ret <vscale x 8 x float> %v
1848 define <vscale x 8 x float> @mgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
1849 ; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f32:
1851 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
1852 ; RV32-NEXT: vsext.vf2 v16, v8
1853 ; RV32-NEXT: vsll.vi v8, v16, 2
1854 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
1855 ; RV32-NEXT: vmv.v.v v8, v12
1858 ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f32:
1860 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1861 ; RV64-NEXT: vsext.vf4 v16, v8
1862 ; RV64-NEXT: vsll.vi v16, v16, 2
1863 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
1864 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
1865 ; RV64-NEXT: vmv.v.v v8, v12
1867 %ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i16> %idxs
1868 %v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
1869 ret <vscale x 8 x float> %v
1872 define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
1873 ; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32:
1875 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
1876 ; RV32-NEXT: vsext.vf2 v16, v8
1877 ; RV32-NEXT: vsll.vi v8, v16, 2
1878 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
1879 ; RV32-NEXT: vmv.v.v v8, v12
1882 ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32:
1884 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1885 ; RV64-NEXT: vsext.vf4 v16, v8
1886 ; RV64-NEXT: vsll.vi v16, v16, 2
1887 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
1888 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
1889 ; RV64-NEXT: vmv.v.v v8, v12
1891 %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
1892 %ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
1893 %v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
1894 ret <vscale x 8 x float> %v
1897 define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
1898 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f32:
1900 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
1901 ; CHECK-NEXT: vzext.vf2 v16, v8
1902 ; CHECK-NEXT: vsll.vi v8, v16, 2
1903 ; CHECK-NEXT: vluxei32.v v12, (a0), v8, v0.t
1904 ; CHECK-NEXT: vmv.v.v v8, v12
1906 %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
1907 %ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
1908 %v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
1909 ret <vscale x 8 x float> %v
1912 define <vscale x 8 x float> @mgather_baseidx_nxv8f32(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
1913 ; RV32-LABEL: mgather_baseidx_nxv8f32:
1915 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
1916 ; RV32-NEXT: vsll.vi v8, v8, 2
1917 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
1918 ; RV32-NEXT: vmv.v.v v8, v12
1921 ; RV64-LABEL: mgather_baseidx_nxv8f32:
1923 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1924 ; RV64-NEXT: vsext.vf2 v16, v8
1925 ; RV64-NEXT: vsll.vi v16, v16, 2
1926 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
1927 ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
1928 ; RV64-NEXT: vmv.v.v v8, v12
1930 %ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %idxs
1931 %v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
1932 ret <vscale x 8 x float> %v
1935 declare <vscale x 1 x double> @llvm.masked.gather.nxv1f64.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x double>)
1937 define <vscale x 1 x double> @mgather_nxv1f64(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x double> %passthru) {
1938 ; RV32-LABEL: mgather_nxv1f64:
1940 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
1941 ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
1942 ; RV32-NEXT: vmv.v.v v8, v9
1945 ; RV64-LABEL: mgather_nxv1f64:
1947 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
1948 ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
1949 ; RV64-NEXT: vmv.v.v v8, v9
1951 %v = call <vscale x 1 x double> @llvm.masked.gather.nxv1f64.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 8, <vscale x 1 x i1> %m, <vscale x 1 x double> %passthru)
1952 ret <vscale x 1 x double> %v
1955 declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
1957 define <vscale x 2 x double> @mgather_nxv2f64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x double> %passthru) {
1958 ; RV32-LABEL: mgather_nxv2f64:
1960 ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
1961 ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
1962 ; RV32-NEXT: vmv.v.v v8, v10
1965 ; RV64-LABEL: mgather_nxv2f64:
1967 ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
1968 ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
1969 ; RV64-NEXT: vmv.v.v v8, v10
1971 %v = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %m, <vscale x 2 x double> %passthru)
1972 ret <vscale x 2 x double> %v
1975 declare <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
1977 define <vscale x 4 x double> @mgather_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x double> %passthru) {
1978 ; RV32-LABEL: mgather_nxv4f64:
1980 ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
1981 ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
1982 ; RV32-NEXT: vmv.v.v v8, v12
1985 ; RV64-LABEL: mgather_nxv4f64:
1987 ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
1988 ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
1989 ; RV64-NEXT: vmv.v.v v8, v12
1991 %v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> %m, <vscale x 4 x double> %passthru)
1992 ret <vscale x 4 x double> %v
1995 define <vscale x 4 x double> @mgather_truemask_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x double> %passthru) {
1996 ; RV32-LABEL: mgather_truemask_nxv4f64:
1998 ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1999 ; RV32-NEXT: vluxei32.v v12, (zero), v8
2000 ; RV32-NEXT: vmv.v.v v8, v12
2003 ; RV64-LABEL: mgather_truemask_nxv4f64:
2005 ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
2006 ; RV64-NEXT: vluxei64.v v8, (zero), v8
2008 %v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x double> %passthru)
2009 ret <vscale x 4 x double> %v
2012 define <vscale x 4 x double> @mgather_falsemask_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x double> %passthru) {
2013 ; CHECK-LABEL: mgather_falsemask_nxv4f64:
2015 ; CHECK-NEXT: vmv4r.v v8, v12
2017 %v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x double> %passthru)
2018 ret <vscale x 4 x double> %v
2021 declare <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x double>)
2023 define <vscale x 8 x double> @mgather_nxv8f64(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2024 ; RV32-LABEL: mgather_nxv8f64:
2026 ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
2027 ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
2028 ; RV32-NEXT: vmv.v.v v8, v16
2031 ; RV64-LABEL: mgather_nxv8f64:
2033 ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
2034 ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
2035 ; RV64-NEXT: vmv.v.v v8, v16
2037 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2038 ret <vscale x 8 x double> %v
2041 define <vscale x 8 x double> @mgather_baseidx_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2042 ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8f64:
2044 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2045 ; RV32-NEXT: vsext.vf4 v12, v8
2046 ; RV32-NEXT: vsll.vi v8, v12, 3
2047 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2048 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
2049 ; RV32-NEXT: vmv.v.v v8, v16
2052 ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f64:
2054 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
2055 ; RV64-NEXT: vsext.vf8 v24, v8
2056 ; RV64-NEXT: vsll.vi v8, v24, 3
2057 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
2058 ; RV64-NEXT: vmv.v.v v8, v16
2060 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i8> %idxs
2061 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2062 ret <vscale x 8 x double> %v
2065 define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2066 ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64:
2068 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2069 ; RV32-NEXT: vsext.vf4 v12, v8
2070 ; RV32-NEXT: vsll.vi v8, v12, 3
2071 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2072 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
2073 ; RV32-NEXT: vmv.v.v v8, v16
2076 ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64:
2078 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
2079 ; RV64-NEXT: vsext.vf8 v24, v8
2080 ; RV64-NEXT: vsll.vi v8, v24, 3
2081 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
2082 ; RV64-NEXT: vmv.v.v v8, v16
2084 %eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
2085 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
2086 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2087 ret <vscale x 8 x double> %v
2090 define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2091 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64:
2093 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
2094 ; CHECK-NEXT: vzext.vf2 v10, v8
2095 ; CHECK-NEXT: vsll.vi v8, v10, 3
2096 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2097 ; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t
2098 ; CHECK-NEXT: vmv.v.v v8, v16
2100 %eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
2101 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
2102 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2103 ret <vscale x 8 x double> %v
2106 define <vscale x 8 x double> @mgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2107 ; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f64:
2109 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2110 ; RV32-NEXT: vsext.vf2 v12, v8
2111 ; RV32-NEXT: vsll.vi v8, v12, 3
2112 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2113 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
2114 ; RV32-NEXT: vmv.v.v v8, v16
2117 ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f64:
2119 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
2120 ; RV64-NEXT: vsext.vf4 v24, v8
2121 ; RV64-NEXT: vsll.vi v8, v24, 3
2122 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
2123 ; RV64-NEXT: vmv.v.v v8, v16
2125 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i16> %idxs
2126 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2127 ret <vscale x 8 x double> %v
2130 define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2131 ; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64:
2133 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2134 ; RV32-NEXT: vsext.vf2 v12, v8
2135 ; RV32-NEXT: vsll.vi v8, v12, 3
2136 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2137 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
2138 ; RV32-NEXT: vmv.v.v v8, v16
2141 ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64:
2143 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
2144 ; RV64-NEXT: vsext.vf4 v24, v8
2145 ; RV64-NEXT: vsll.vi v8, v24, 3
2146 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
2147 ; RV64-NEXT: vmv.v.v v8, v16
2149 %eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
2150 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
2151 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2152 ret <vscale x 8 x double> %v
2155 define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2156 ; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64:
2158 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2159 ; CHECK-NEXT: vzext.vf2 v12, v8
2160 ; CHECK-NEXT: vsll.vi v8, v12, 3
2161 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2162 ; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t
2163 ; CHECK-NEXT: vmv.v.v v8, v16
2165 %eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
2166 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
2167 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2168 ret <vscale x 8 x double> %v
2171 define <vscale x 8 x double> @mgather_baseidx_nxv8i32_nxv8f64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2172 ; RV32-LABEL: mgather_baseidx_nxv8i32_nxv8f64:
2174 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2175 ; RV32-NEXT: vsll.vi v8, v8, 3
2176 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2177 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
2178 ; RV32-NEXT: vmv.v.v v8, v16
2181 ; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8f64:
2183 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
2184 ; RV64-NEXT: vsext.vf2 v24, v8
2185 ; RV64-NEXT: vsll.vi v8, v24, 3
2186 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
2187 ; RV64-NEXT: vmv.v.v v8, v16
2189 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
2190 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2191 ret <vscale x 8 x double> %v
2194 define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2195 ; RV32-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64:
2197 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2198 ; RV32-NEXT: vsll.vi v8, v8, 3
2199 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2200 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
2201 ; RV32-NEXT: vmv.v.v v8, v16
2204 ; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64:
2206 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
2207 ; RV64-NEXT: vsext.vf2 v24, v8
2208 ; RV64-NEXT: vsll.vi v8, v24, 3
2209 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
2210 ; RV64-NEXT: vmv.v.v v8, v16
2212 %eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
2213 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
2214 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2215 ret <vscale x 8 x double> %v
2218 define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2219 ; RV32-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64:
2221 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2222 ; RV32-NEXT: vsll.vi v8, v8, 3
2223 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2224 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
2225 ; RV32-NEXT: vmv.v.v v8, v16
2228 ; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64:
2230 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
2231 ; RV64-NEXT: vzext.vf2 v24, v8
2232 ; RV64-NEXT: vsll.vi v8, v24, 3
2233 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
2234 ; RV64-NEXT: vmv.v.v v8, v16
2236 %eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
2237 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
2238 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2239 ret <vscale x 8 x double> %v
2242 define <vscale x 8 x double> @mgather_baseidx_nxv8f64(ptr %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
2243 ; RV32-LABEL: mgather_baseidx_nxv8f64:
2245 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2246 ; RV32-NEXT: vnsrl.wi v24, v8, 0
2247 ; RV32-NEXT: vsll.vi v8, v24, 3
2248 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
2249 ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
2250 ; RV32-NEXT: vmv.v.v v8, v16
2253 ; RV64-LABEL: mgather_baseidx_nxv8f64:
2255 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
2256 ; RV64-NEXT: vsll.vi v8, v8, 3
2257 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
2258 ; RV64-NEXT: vmv.v.v v8, v16
2260 %ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %idxs
2261 %v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
2262 ret <vscale x 8 x double> %v
2265 declare <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
2267 define <vscale x 16 x i8> @mgather_baseidx_nxv16i8(ptr %base, <vscale x 16 x i8> %idxs, <vscale x 16 x i1> %m, <vscale x 16 x i8> %passthru) {
2268 ; RV32-LABEL: mgather_baseidx_nxv16i8:
2270 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
2271 ; RV32-NEXT: vsext.vf4 v16, v8
2272 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
2273 ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t
2274 ; RV32-NEXT: vmv.v.v v8, v10
2277 ; RV64-LABEL: mgather_baseidx_nxv16i8:
2279 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2280 ; RV64-NEXT: vsext.vf8 v16, v8
2281 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
2282 ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
2283 ; RV64-NEXT: csrr a1, vlenb
2284 ; RV64-NEXT: srli a1, a1, 3
2285 ; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
2286 ; RV64-NEXT: vslidedown.vx v0, v0, a1
2287 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2288 ; RV64-NEXT: vsext.vf8 v16, v9
2289 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
2290 ; RV64-NEXT: vluxei64.v v11, (a0), v16, v0.t
2291 ; RV64-NEXT: vmv2r.v v8, v10
2293 %ptrs = getelementptr inbounds i8, ptr %base, <vscale x 16 x i8> %idxs
2294 %v = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %ptrs, i32 2, <vscale x 16 x i1> %m, <vscale x 16 x i8> %passthru)
2295 ret <vscale x 16 x i8> %v
2298 declare <vscale x 32 x i8> @llvm.masked.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr>, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
2300 define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8> %idxs, <vscale x 32 x i1> %m, <vscale x 32 x i8> %passthru) {
2301 ; RV32-LABEL: mgather_baseidx_nxv32i8:
2303 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
2304 ; RV32-NEXT: vsext.vf4 v16, v8
2305 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
2306 ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
2307 ; RV32-NEXT: csrr a1, vlenb
2308 ; RV32-NEXT: srli a1, a1, 2
2309 ; RV32-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
2310 ; RV32-NEXT: vslidedown.vx v0, v0, a1
2311 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
2312 ; RV32-NEXT: vsext.vf4 v16, v10
2313 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
2314 ; RV32-NEXT: vluxei32.v v14, (a0), v16, v0.t
2315 ; RV32-NEXT: vmv4r.v v8, v12
2318 ; RV64-LABEL: mgather_baseidx_nxv32i8:
2320 ; RV64-NEXT: vmv1r.v v16, v0
2321 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2322 ; RV64-NEXT: vsext.vf8 v24, v8
2323 ; RV64-NEXT: csrr a1, vlenb
2324 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
2325 ; RV64-NEXT: vluxei64.v v12, (a0), v24, v0.t
2326 ; RV64-NEXT: srli a2, a1, 3
2327 ; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
2328 ; RV64-NEXT: vslidedown.vx v0, v0, a2
2329 ; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
2330 ; RV64-NEXT: vsext.vf8 v24, v9
2331 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
2332 ; RV64-NEXT: vluxei64.v v13, (a0), v24, v0.t
2333 ; RV64-NEXT: srli a1, a1, 2
2334 ; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
2335 ; RV64-NEXT: vslidedown.vx v8, v16, a1
2336 ; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
2337 ; RV64-NEXT: vslidedown.vx v0, v8, a2
2338 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2339 ; RV64-NEXT: vsext.vf8 v16, v11
2340 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
2341 ; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t
2342 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
2343 ; RV64-NEXT: vsext.vf8 v16, v10
2344 ; RV64-NEXT: vmv1r.v v0, v8
2345 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
2346 ; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t
2347 ; RV64-NEXT: vmv4r.v v8, v12
2349 %ptrs = getelementptr inbounds i8, ptr %base, <vscale x 32 x i8> %idxs
2350 %v = call <vscale x 32 x i8> @llvm.masked.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr> %ptrs, i32 2, <vscale x 32 x i1> %m, <vscale x 32 x i8> %passthru)
2351 ret <vscale x 32 x i8> %v
2354 define <vscale x 1 x i8> @mgather_baseidx_zext_nxv1i1_nxv1i8(ptr %base, <vscale x 1 x i1> %idxs, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru) {
2355 ; CHECK-LABEL: mgather_baseidx_zext_nxv1i1_nxv1i8:
2357 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
2358 ; CHECK-NEXT: vmv.v.i v10, 0
2359 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
2360 ; CHECK-NEXT: vmv1r.v v0, v8
2361 ; CHECK-NEXT: vluxei8.v v9, (a0), v10, v0.t
2362 ; CHECK-NEXT: vmv1r.v v8, v9
2364 %eidxs = zext <vscale x 1 x i1> %idxs to <vscale x 1 x i8>
2365 %ptrs = getelementptr inbounds i8, ptr %base, <vscale x 1 x i8> %eidxs
2366 %v = call <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 1, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru)
2367 ret <vscale x 1 x i8> %v