1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
11 define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
12 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
13 ; CHECK: # %bb.0: # %entry
14 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
15 ; CHECK-NEXT: vrgather.vv v10, v8, v9
16 ; CHECK-NEXT: vmv1r.v v8, v10
19 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
20 <vscale x 1 x i8> undef,
25 ret <vscale x 1 x i8> %a
28 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(
36 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
40 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
43 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(
50 ret <vscale x 1 x i8> %a
53 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(
59 define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
63 ; CHECK-NEXT: vrgather.vv v10, v8, v9
64 ; CHECK-NEXT: vmv1r.v v8, v10
67 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(
68 <vscale x 2 x i8> undef,
73 ret <vscale x 2 x i8> %a
76 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(
84 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
88 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
91 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(
98 ret <vscale x 2 x i8> %a
101 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(
107 define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
111 ; CHECK-NEXT: vrgather.vv v10, v8, v9
112 ; CHECK-NEXT: vmv1r.v v8, v10
115 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(
116 <vscale x 4 x i8> undef,
117 <vscale x 4 x i8> %0,
118 <vscale x 4 x i8> %1,
121 ret <vscale x 4 x i8> %a
124 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(
132 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
133 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8:
134 ; CHECK: # %bb.0: # %entry
135 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
136 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
139 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(
140 <vscale x 4 x i8> %0,
141 <vscale x 4 x i8> %1,
142 <vscale x 4 x i8> %2,
143 <vscale x 4 x i1> %3,
146 ret <vscale x 4 x i8> %a
149 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(
155 define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
156 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8:
157 ; CHECK: # %bb.0: # %entry
158 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
159 ; CHECK-NEXT: vrgather.vv v10, v8, v9
160 ; CHECK-NEXT: vmv.v.v v8, v10
163 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(
164 <vscale x 8 x i8> undef,
165 <vscale x 8 x i8> %0,
166 <vscale x 8 x i8> %1,
169 ret <vscale x 8 x i8> %a
172 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(
180 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
181 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8:
182 ; CHECK: # %bb.0: # %entry
183 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
184 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
187 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(
188 <vscale x 8 x i8> %0,
189 <vscale x 8 x i8> %1,
190 <vscale x 8 x i8> %2,
191 <vscale x 8 x i1> %3,
194 ret <vscale x 8 x i8> %a
197 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(
203 define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
204 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8:
205 ; CHECK: # %bb.0: # %entry
206 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
207 ; CHECK-NEXT: vrgather.vv v12, v8, v10
208 ; CHECK-NEXT: vmv.v.v v8, v12
211 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(
212 <vscale x 16 x i8> undef,
213 <vscale x 16 x i8> %0,
214 <vscale x 16 x i8> %1,
217 ret <vscale x 16 x i8> %a
220 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(
228 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
229 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8:
230 ; CHECK: # %bb.0: # %entry
231 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
232 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
235 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(
236 <vscale x 16 x i8> %0,
237 <vscale x 16 x i8> %1,
238 <vscale x 16 x i8> %2,
239 <vscale x 16 x i1> %3,
242 ret <vscale x 16 x i8> %a
245 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(
251 define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
252 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8:
253 ; CHECK: # %bb.0: # %entry
254 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
255 ; CHECK-NEXT: vrgather.vv v16, v8, v12
256 ; CHECK-NEXT: vmv.v.v v8, v16
259 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(
260 <vscale x 32 x i8> undef,
261 <vscale x 32 x i8> %0,
262 <vscale x 32 x i8> %1,
265 ret <vscale x 32 x i8> %a
268 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(
276 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
277 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8:
278 ; CHECK: # %bb.0: # %entry
279 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
280 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
283 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(
284 <vscale x 32 x i8> %0,
285 <vscale x 32 x i8> %1,
286 <vscale x 32 x i8> %2,
287 <vscale x 32 x i1> %3,
290 ret <vscale x 32 x i8> %a
293 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(
299 define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
300 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8:
301 ; CHECK: # %bb.0: # %entry
302 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
303 ; CHECK-NEXT: vrgather.vv v24, v8, v16
304 ; CHECK-NEXT: vmv.v.v v8, v24
307 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(
308 <vscale x 64 x i8> undef,
309 <vscale x 64 x i8> %0,
310 <vscale x 64 x i8> %1,
313 ret <vscale x 64 x i8> %a
316 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(
324 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
325 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: vl8r.v v24, (a0)
328 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
329 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
332 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(
333 <vscale x 64 x i8> %0,
334 <vscale x 64 x i8> %1,
335 <vscale x 64 x i8> %2,
336 <vscale x 64 x i1> %3,
339 ret <vscale x 64 x i8> %a
342 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(
348 define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
349 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16:
350 ; CHECK: # %bb.0: # %entry
351 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
352 ; CHECK-NEXT: vrgather.vv v10, v8, v9
353 ; CHECK-NEXT: vmv1r.v v8, v10
356 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(
357 <vscale x 1 x i16> undef,
358 <vscale x 1 x i16> %0,
359 <vscale x 1 x i16> %1,
362 ret <vscale x 1 x i16> %a
365 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(
373 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
374 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16:
375 ; CHECK: # %bb.0: # %entry
376 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
377 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
380 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(
381 <vscale x 1 x i16> %0,
382 <vscale x 1 x i16> %1,
383 <vscale x 1 x i16> %2,
384 <vscale x 1 x i1> %3,
387 ret <vscale x 1 x i16> %a
390 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(
396 define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
397 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16:
398 ; CHECK: # %bb.0: # %entry
399 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
400 ; CHECK-NEXT: vrgather.vv v10, v8, v9
401 ; CHECK-NEXT: vmv1r.v v8, v10
404 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(
405 <vscale x 2 x i16> undef,
406 <vscale x 2 x i16> %0,
407 <vscale x 2 x i16> %1,
410 ret <vscale x 2 x i16> %a
413 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(
421 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
422 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16:
423 ; CHECK: # %bb.0: # %entry
424 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
425 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
428 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(
429 <vscale x 2 x i16> %0,
430 <vscale x 2 x i16> %1,
431 <vscale x 2 x i16> %2,
432 <vscale x 2 x i1> %3,
435 ret <vscale x 2 x i16> %a
438 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(
444 define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
445 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16:
446 ; CHECK: # %bb.0: # %entry
447 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
448 ; CHECK-NEXT: vrgather.vv v10, v8, v9
449 ; CHECK-NEXT: vmv.v.v v8, v10
452 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(
453 <vscale x 4 x i16> undef,
454 <vscale x 4 x i16> %0,
455 <vscale x 4 x i16> %1,
458 ret <vscale x 4 x i16> %a
461 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(
469 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
470 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16:
471 ; CHECK: # %bb.0: # %entry
472 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
473 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
476 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(
477 <vscale x 4 x i16> %0,
478 <vscale x 4 x i16> %1,
479 <vscale x 4 x i16> %2,
480 <vscale x 4 x i1> %3,
483 ret <vscale x 4 x i16> %a
486 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(
492 define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
493 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16:
494 ; CHECK: # %bb.0: # %entry
495 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
496 ; CHECK-NEXT: vrgather.vv v12, v8, v10
497 ; CHECK-NEXT: vmv.v.v v8, v12
500 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(
501 <vscale x 8 x i16> undef,
502 <vscale x 8 x i16> %0,
503 <vscale x 8 x i16> %1,
506 ret <vscale x 8 x i16> %a
509 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(
517 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
518 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16:
519 ; CHECK: # %bb.0: # %entry
520 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
521 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
524 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(
525 <vscale x 8 x i16> %0,
526 <vscale x 8 x i16> %1,
527 <vscale x 8 x i16> %2,
528 <vscale x 8 x i1> %3,
531 ret <vscale x 8 x i16> %a
534 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(
540 define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
541 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
544 ; CHECK-NEXT: vrgather.vv v16, v8, v12
545 ; CHECK-NEXT: vmv.v.v v8, v16
548 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(
549 <vscale x 16 x i16> undef,
550 <vscale x 16 x i16> %0,
551 <vscale x 16 x i16> %1,
554 ret <vscale x 16 x i16> %a
557 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(
565 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
566 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
569 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
572 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(
573 <vscale x 16 x i16> %0,
574 <vscale x 16 x i16> %1,
575 <vscale x 16 x i16> %2,
576 <vscale x 16 x i1> %3,
579 ret <vscale x 16 x i16> %a
582 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(
588 define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
589 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
592 ; CHECK-NEXT: vrgather.vv v24, v8, v16
593 ; CHECK-NEXT: vmv.v.v v8, v24
596 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(
597 <vscale x 32 x i16> undef,
598 <vscale x 32 x i16> %0,
599 <vscale x 32 x i16> %1,
602 ret <vscale x 32 x i16> %a
605 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(
613 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
614 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vl8re16.v v24, (a0)
617 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
618 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
621 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(
622 <vscale x 32 x i16> %0,
623 <vscale x 32 x i16> %1,
624 <vscale x 32 x i16> %2,
625 <vscale x 32 x i1> %3,
628 ret <vscale x 32 x i16> %a
631 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(
637 define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
638 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32:
639 ; CHECK: # %bb.0: # %entry
640 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
641 ; CHECK-NEXT: vrgather.vv v10, v8, v9
642 ; CHECK-NEXT: vmv1r.v v8, v10
645 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(
646 <vscale x 1 x i32> undef,
647 <vscale x 1 x i32> %0,
648 <vscale x 1 x i32> %1,
651 ret <vscale x 1 x i32> %a
654 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(
662 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
663 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32:
664 ; CHECK: # %bb.0: # %entry
665 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
666 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
669 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(
670 <vscale x 1 x i32> %0,
671 <vscale x 1 x i32> %1,
672 <vscale x 1 x i32> %2,
673 <vscale x 1 x i1> %3,
676 ret <vscale x 1 x i32> %a
679 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(
685 define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
686 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32:
687 ; CHECK: # %bb.0: # %entry
688 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
689 ; CHECK-NEXT: vrgather.vv v10, v8, v9
690 ; CHECK-NEXT: vmv.v.v v8, v10
693 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(
694 <vscale x 2 x i32> undef,
695 <vscale x 2 x i32> %0,
696 <vscale x 2 x i32> %1,
699 ret <vscale x 2 x i32> %a
702 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(
710 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
711 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
714 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
717 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(
718 <vscale x 2 x i32> %0,
719 <vscale x 2 x i32> %1,
720 <vscale x 2 x i32> %2,
721 <vscale x 2 x i1> %3,
724 ret <vscale x 2 x i32> %a
727 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(
733 define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
734 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32:
735 ; CHECK: # %bb.0: # %entry
736 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
737 ; CHECK-NEXT: vrgather.vv v12, v8, v10
738 ; CHECK-NEXT: vmv.v.v v8, v12
741 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(
742 <vscale x 4 x i32> undef,
743 <vscale x 4 x i32> %0,
744 <vscale x 4 x i32> %1,
747 ret <vscale x 4 x i32> %a
750 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(
758 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
759 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32:
760 ; CHECK: # %bb.0: # %entry
761 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
762 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
765 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(
766 <vscale x 4 x i32> %0,
767 <vscale x 4 x i32> %1,
768 <vscale x 4 x i32> %2,
769 <vscale x 4 x i1> %3,
772 ret <vscale x 4 x i32> %a
775 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(
781 define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
782 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32:
783 ; CHECK: # %bb.0: # %entry
784 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
785 ; CHECK-NEXT: vrgather.vv v16, v8, v12
786 ; CHECK-NEXT: vmv.v.v v8, v16
789 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(
790 <vscale x 8 x i32> undef,
791 <vscale x 8 x i32> %0,
792 <vscale x 8 x i32> %1,
795 ret <vscale x 8 x i32> %a
798 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(
806 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
807 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32:
808 ; CHECK: # %bb.0: # %entry
809 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
810 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
813 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(
814 <vscale x 8 x i32> %0,
815 <vscale x 8 x i32> %1,
816 <vscale x 8 x i32> %2,
817 <vscale x 8 x i1> %3,
820 ret <vscale x 8 x i32> %a
823 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(
829 define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
830 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32:
831 ; CHECK: # %bb.0: # %entry
832 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
833 ; CHECK-NEXT: vrgather.vv v24, v8, v16
834 ; CHECK-NEXT: vmv.v.v v8, v24
837 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(
838 <vscale x 16 x i32> undef,
839 <vscale x 16 x i32> %0,
840 <vscale x 16 x i32> %1,
843 ret <vscale x 16 x i32> %a
846 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(
854 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
855 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
856 ; CHECK: # %bb.0: # %entry
857 ; CHECK-NEXT: vl8re32.v v24, (a0)
858 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
859 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
862 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(
863 <vscale x 16 x i32> %0,
864 <vscale x 16 x i32> %1,
865 <vscale x 16 x i32> %2,
866 <vscale x 16 x i1> %3,
869 ret <vscale x 16 x i32> %a
872 declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i32(
878 define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
879 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16:
880 ; CHECK: # %bb.0: # %entry
881 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
882 ; CHECK-NEXT: vrgather.vv v10, v8, v9
883 ; CHECK-NEXT: vmv1r.v v8, v10
886 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i32(
887 <vscale x 1 x half> undef,
888 <vscale x 1 x half> %0,
889 <vscale x 1 x i16> %1,
892 ret <vscale x 1 x half> %a
895 declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i32(
903 define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
904 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16:
905 ; CHECK: # %bb.0: # %entry
906 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
907 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
910 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i32(
911 <vscale x 1 x half> %0,
912 <vscale x 1 x half> %1,
913 <vscale x 1 x i16> %2,
914 <vscale x 1 x i1> %3,
917 ret <vscale x 1 x half> %a
920 declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i32(
926 define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
927 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16:
928 ; CHECK: # %bb.0: # %entry
929 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
930 ; CHECK-NEXT: vrgather.vv v10, v8, v9
931 ; CHECK-NEXT: vmv1r.v v8, v10
934 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i32(
935 <vscale x 2 x half> undef,
936 <vscale x 2 x half> %0,
937 <vscale x 2 x i16> %1,
940 ret <vscale x 2 x half> %a
943 declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i32(
951 define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
952 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16:
953 ; CHECK: # %bb.0: # %entry
954 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
955 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
958 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i32(
959 <vscale x 2 x half> %0,
960 <vscale x 2 x half> %1,
961 <vscale x 2 x i16> %2,
962 <vscale x 2 x i1> %3,
965 ret <vscale x 2 x half> %a
968 declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i32(
974 define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
975 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16:
976 ; CHECK: # %bb.0: # %entry
977 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
978 ; CHECK-NEXT: vrgather.vv v10, v8, v9
979 ; CHECK-NEXT: vmv.v.v v8, v10
982 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i32(
983 <vscale x 4 x half> undef,
984 <vscale x 4 x half> %0,
985 <vscale x 4 x i16> %1,
988 ret <vscale x 4 x half> %a
991 declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i32(
999 define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1000 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16:
1001 ; CHECK: # %bb.0: # %entry
1002 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1003 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1006 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i32(
1007 <vscale x 4 x half> %0,
1008 <vscale x 4 x half> %1,
1009 <vscale x 4 x i16> %2,
1010 <vscale x 4 x i1> %3,
1013 ret <vscale x 4 x half> %a
1016 declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i32(
1017 <vscale x 8 x half>,
1018 <vscale x 8 x half>,
1022 define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
1023 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16:
1024 ; CHECK: # %bb.0: # %entry
1025 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1026 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1027 ; CHECK-NEXT: vmv.v.v v8, v12
1030 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i32(
1031 <vscale x 8 x half> undef,
1032 <vscale x 8 x half> %0,
1033 <vscale x 8 x i16> %1,
1036 ret <vscale x 8 x half> %a
1039 declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i32(
1040 <vscale x 8 x half>,
1041 <vscale x 8 x half>,
1047 define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1048 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16:
1049 ; CHECK: # %bb.0: # %entry
1050 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1051 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1054 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i32(
1055 <vscale x 8 x half> %0,
1056 <vscale x 8 x half> %1,
1057 <vscale x 8 x i16> %2,
1058 <vscale x 8 x i1> %3,
1061 ret <vscale x 8 x half> %a
1064 declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i32(
1065 <vscale x 16 x half>,
1066 <vscale x 16 x half>,
1067 <vscale x 16 x i16>,
1070 define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
1071 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16:
1072 ; CHECK: # %bb.0: # %entry
1073 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1074 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1075 ; CHECK-NEXT: vmv.v.v v8, v16
1078 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i32(
1079 <vscale x 16 x half> undef,
1080 <vscale x 16 x half> %0,
1081 <vscale x 16 x i16> %1,
1084 ret <vscale x 16 x half> %a
1087 declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i32(
1088 <vscale x 16 x half>,
1089 <vscale x 16 x half>,
1090 <vscale x 16 x i16>,
1095 define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1096 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16:
1097 ; CHECK: # %bb.0: # %entry
1098 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1099 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1102 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i32(
1103 <vscale x 16 x half> %0,
1104 <vscale x 16 x half> %1,
1105 <vscale x 16 x i16> %2,
1106 <vscale x 16 x i1> %3,
1109 ret <vscale x 16 x half> %a
1112 declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i32(
1113 <vscale x 32 x half>,
1114 <vscale x 32 x half>,
1115 <vscale x 32 x i16>,
1118 define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
1119 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16:
1120 ; CHECK: # %bb.0: # %entry
1121 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1122 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1123 ; CHECK-NEXT: vmv.v.v v8, v24
1126 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i32(
1127 <vscale x 32 x half> undef,
1128 <vscale x 32 x half> %0,
1129 <vscale x 32 x i16> %1,
1132 ret <vscale x 32 x half> %a
1135 declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i32(
1136 <vscale x 32 x half>,
1137 <vscale x 32 x half>,
1138 <vscale x 32 x i16>,
1143 define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1144 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
1145 ; CHECK: # %bb.0: # %entry
1146 ; CHECK-NEXT: vl8re16.v v24, (a0)
1147 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1148 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1151 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i32(
1152 <vscale x 32 x half> %0,
1153 <vscale x 32 x half> %1,
1154 <vscale x 32 x i16> %2,
1155 <vscale x 32 x i1> %3,
1158 ret <vscale x 32 x half> %a
1161 declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(
1162 <vscale x 1 x float>,
1163 <vscale x 1 x float>,
1167 define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
1168 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32:
1169 ; CHECK: # %bb.0: # %entry
1170 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1171 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1172 ; CHECK-NEXT: vmv1r.v v8, v10
1175 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(
1176 <vscale x 1 x float> undef,
1177 <vscale x 1 x float> %0,
1178 <vscale x 1 x i32> %1,
1181 ret <vscale x 1 x float> %a
1184 declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i32(
1185 <vscale x 1 x float>,
1186 <vscale x 1 x float>,
1192 define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1193 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32:
1194 ; CHECK: # %bb.0: # %entry
1195 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1196 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1199 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i32(
1200 <vscale x 1 x float> %0,
1201 <vscale x 1 x float> %1,
1202 <vscale x 1 x i32> %2,
1203 <vscale x 1 x i1> %3,
1206 ret <vscale x 1 x float> %a
1209 declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(
1210 <vscale x 2 x float>,
1211 <vscale x 2 x float>,
1215 define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
1216 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32:
1217 ; CHECK: # %bb.0: # %entry
1218 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1219 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1220 ; CHECK-NEXT: vmv.v.v v8, v10
1223 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(
1224 <vscale x 2 x float> undef,
1225 <vscale x 2 x float> %0,
1226 <vscale x 2 x i32> %1,
1229 ret <vscale x 2 x float> %a
1232 declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i32(
1233 <vscale x 2 x float>,
1234 <vscale x 2 x float>,
1240 define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1241 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32:
1242 ; CHECK: # %bb.0: # %entry
1243 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1244 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1247 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i32(
1248 <vscale x 2 x float> %0,
1249 <vscale x 2 x float> %1,
1250 <vscale x 2 x i32> %2,
1251 <vscale x 2 x i1> %3,
1254 ret <vscale x 2 x float> %a
1257 declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(
1258 <vscale x 4 x float>,
1259 <vscale x 4 x float>,
1263 define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
1264 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32:
1265 ; CHECK: # %bb.0: # %entry
1266 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1267 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1268 ; CHECK-NEXT: vmv.v.v v8, v12
1271 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(
1272 <vscale x 4 x float> undef,
1273 <vscale x 4 x float> %0,
1274 <vscale x 4 x i32> %1,
1277 ret <vscale x 4 x float> %a
1280 declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i32(
1281 <vscale x 4 x float>,
1282 <vscale x 4 x float>,
1288 define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1289 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32:
1290 ; CHECK: # %bb.0: # %entry
1291 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1292 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1295 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i32(
1296 <vscale x 4 x float> %0,
1297 <vscale x 4 x float> %1,
1298 <vscale x 4 x i32> %2,
1299 <vscale x 4 x i1> %3,
1302 ret <vscale x 4 x float> %a
1305 declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(
1306 <vscale x 8 x float>,
1307 <vscale x 8 x float>,
1311 define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
1312 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32:
1313 ; CHECK: # %bb.0: # %entry
1314 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1315 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1316 ; CHECK-NEXT: vmv.v.v v8, v16
1319 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(
1320 <vscale x 8 x float> undef,
1321 <vscale x 8 x float> %0,
1322 <vscale x 8 x i32> %1,
1325 ret <vscale x 8 x float> %a
1328 declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i32(
1329 <vscale x 8 x float>,
1330 <vscale x 8 x float>,
1336 define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1337 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32:
1338 ; CHECK: # %bb.0: # %entry
1339 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1340 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1343 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i32(
1344 <vscale x 8 x float> %0,
1345 <vscale x 8 x float> %1,
1346 <vscale x 8 x i32> %2,
1347 <vscale x 8 x i1> %3,
1350 ret <vscale x 8 x float> %a
1353 declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(
1354 <vscale x 16 x float>,
1355 <vscale x 16 x float>,
1356 <vscale x 16 x i32>,
1359 define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
1360 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32:
1361 ; CHECK: # %bb.0: # %entry
1362 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1363 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1364 ; CHECK-NEXT: vmv.v.v v8, v24
1367 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(
1368 <vscale x 16 x float> undef,
1369 <vscale x 16 x float> %0,
1370 <vscale x 16 x i32> %1,
1373 ret <vscale x 16 x float> %a
1376 declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(
1377 <vscale x 16 x float>,
1378 <vscale x 16 x float>,
1379 <vscale x 16 x i32>,
1384 define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1385 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
1386 ; CHECK: # %bb.0: # %entry
1387 ; CHECK-NEXT: vl8re32.v v24, (a0)
1388 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1389 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1392 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(
1393 <vscale x 16 x float> %0,
1394 <vscale x 16 x float> %1,
1395 <vscale x 16 x i32> %2,
1396 <vscale x 16 x i1> %3,
1399 ret <vscale x 16 x float> %a
1402 declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(
1403 <vscale x 1 x double>,
1404 <vscale x 1 x double>,
1408 define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
1409 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64:
1410 ; CHECK: # %bb.0: # %entry
1411 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1412 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1413 ; CHECK-NEXT: vmv.v.v v8, v10
1416 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(
1417 <vscale x 1 x double> undef,
1418 <vscale x 1 x double> %0,
1419 <vscale x 1 x i64> %1,
1422 ret <vscale x 1 x double> %a
1425 declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i32(
1426 <vscale x 1 x double>,
1427 <vscale x 1 x double>,
1433 define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1434 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64:
1435 ; CHECK: # %bb.0: # %entry
1436 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1437 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1440 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i32(
1441 <vscale x 1 x double> %0,
1442 <vscale x 1 x double> %1,
1443 <vscale x 1 x i64> %2,
1444 <vscale x 1 x i1> %3,
1447 ret <vscale x 1 x double> %a
1450 declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(
1451 <vscale x 2 x double>,
1452 <vscale x 2 x double>,
1456 define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
1457 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64:
1458 ; CHECK: # %bb.0: # %entry
1459 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1460 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1461 ; CHECK-NEXT: vmv.v.v v8, v12
1464 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(
1465 <vscale x 2 x double> undef,
1466 <vscale x 2 x double> %0,
1467 <vscale x 2 x i64> %1,
1470 ret <vscale x 2 x double> %a
1473 declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i32(
1474 <vscale x 2 x double>,
1475 <vscale x 2 x double>,
1481 define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1482 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64:
1483 ; CHECK: # %bb.0: # %entry
1484 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1485 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1488 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i32(
1489 <vscale x 2 x double> %0,
1490 <vscale x 2 x double> %1,
1491 <vscale x 2 x i64> %2,
1492 <vscale x 2 x i1> %3,
1495 ret <vscale x 2 x double> %a
1498 declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(
1499 <vscale x 4 x double>,
1500 <vscale x 4 x double>,
1504 define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
1505 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64:
1506 ; CHECK: # %bb.0: # %entry
1507 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1508 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1509 ; CHECK-NEXT: vmv.v.v v8, v16
1512 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(
1513 <vscale x 4 x double> undef,
1514 <vscale x 4 x double> %0,
1515 <vscale x 4 x i64> %1,
1518 ret <vscale x 4 x double> %a
1521 declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i32(
1522 <vscale x 4 x double>,
1523 <vscale x 4 x double>,
1529 define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1530 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64:
1531 ; CHECK: # %bb.0: # %entry
1532 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1533 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1536 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i32(
1537 <vscale x 4 x double> %0,
1538 <vscale x 4 x double> %1,
1539 <vscale x 4 x i64> %2,
1540 <vscale x 4 x i1> %3,
1543 ret <vscale x 4 x double> %a
1546 declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(
1547 <vscale x 8 x double>,
1548 <vscale x 8 x double>,
1552 define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
1553 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64:
1554 ; CHECK: # %bb.0: # %entry
1555 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1556 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1557 ; CHECK-NEXT: vmv.v.v v8, v24
1560 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(
1561 <vscale x 8 x double> undef,
1562 <vscale x 8 x double> %0,
1563 <vscale x 8 x i64> %1,
1566 ret <vscale x 8 x double> %a
1569 declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(
1570 <vscale x 8 x double>,
1571 <vscale x 8 x double>,
1577 define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1578 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
1579 ; CHECK: # %bb.0: # %entry
1580 ; CHECK-NEXT: vl8re64.v v24, (a0)
1581 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1582 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1585 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(
1586 <vscale x 8 x double> %0,
1587 <vscale x 8 x double> %1,
1588 <vscale x 8 x i64> %2,
1589 <vscale x 8 x i1> %3,
1592 ret <vscale x 8 x double> %a
1595 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
1601 define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
1602 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32:
1603 ; CHECK: # %bb.0: # %entry
1604 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1605 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1606 ; CHECK-NEXT: vmv1r.v v8, v9
1609 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
1610 <vscale x 1 x i8> undef,
1611 <vscale x 1 x i8> %0,
1615 ret <vscale x 1 x i8> %a
1618 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
1626 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i32(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1627 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i32:
1628 ; CHECK: # %bb.0: # %entry
1629 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1630 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1633 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
1634 <vscale x 1 x i8> %0,
1635 <vscale x 1 x i8> %1,
1637 <vscale x 1 x i1> %3,
1640 ret <vscale x 1 x i8> %a
1643 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
1649 define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
1650 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32:
1651 ; CHECK: # %bb.0: # %entry
1652 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1653 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1654 ; CHECK-NEXT: vmv1r.v v8, v9
1657 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
1658 <vscale x 2 x i8> undef,
1659 <vscale x 2 x i8> %0,
1663 ret <vscale x 2 x i8> %a
1666 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
1674 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i32(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1675 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i32:
1676 ; CHECK: # %bb.0: # %entry
1677 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1678 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1681 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
1682 <vscale x 2 x i8> %0,
1683 <vscale x 2 x i8> %1,
1685 <vscale x 2 x i1> %3,
1688 ret <vscale x 2 x i8> %a
1691 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
1697 define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
1698 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32:
1699 ; CHECK: # %bb.0: # %entry
1700 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1701 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1702 ; CHECK-NEXT: vmv1r.v v8, v9
1705 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
1706 <vscale x 4 x i8> undef,
1707 <vscale x 4 x i8> %0,
1711 ret <vscale x 4 x i8> %a
1714 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
1722 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i32(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1723 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i32:
1724 ; CHECK: # %bb.0: # %entry
1725 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1726 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1729 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
1730 <vscale x 4 x i8> %0,
1731 <vscale x 4 x i8> %1,
1733 <vscale x 4 x i1> %3,
1736 ret <vscale x 4 x i8> %a
1739 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
1745 define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
1746 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32:
1747 ; CHECK: # %bb.0: # %entry
1748 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1749 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1750 ; CHECK-NEXT: vmv.v.v v8, v9
1753 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
1754 <vscale x 8 x i8> undef,
1755 <vscale x 8 x i8> %0,
1759 ret <vscale x 8 x i8> %a
1762 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
1770 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i32(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1771 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i32:
1772 ; CHECK: # %bb.0: # %entry
1773 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1774 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1777 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
1778 <vscale x 8 x i8> %0,
1779 <vscale x 8 x i8> %1,
1781 <vscale x 8 x i1> %3,
1784 ret <vscale x 8 x i8> %a
1787 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
1793 define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
1794 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32:
1795 ; CHECK: # %bb.0: # %entry
1796 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1797 ; CHECK-NEXT: vrgather.vx v10, v8, a0
1798 ; CHECK-NEXT: vmv.v.v v8, v10
1801 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
1802 <vscale x 16 x i8> undef,
1803 <vscale x 16 x i8> %0,
1807 ret <vscale x 16 x i8> %a
1810 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
1818 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i32(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1819 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i32:
1820 ; CHECK: # %bb.0: # %entry
1821 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1822 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
1825 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
1826 <vscale x 16 x i8> %0,
1827 <vscale x 16 x i8> %1,
1829 <vscale x 16 x i1> %3,
1832 ret <vscale x 16 x i8> %a
1835 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
1841 define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
1842 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32:
1843 ; CHECK: # %bb.0: # %entry
1844 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1845 ; CHECK-NEXT: vrgather.vx v12, v8, a0
1846 ; CHECK-NEXT: vmv.v.v v8, v12
1849 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
1850 <vscale x 32 x i8> undef,
1851 <vscale x 32 x i8> %0,
1855 ret <vscale x 32 x i8> %a
1858 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
1866 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i32(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1867 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i32:
1868 ; CHECK: # %bb.0: # %entry
1869 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1870 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
1873 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
1874 <vscale x 32 x i8> %0,
1875 <vscale x 32 x i8> %1,
1877 <vscale x 32 x i1> %3,
1880 ret <vscale x 32 x i8> %a
1883 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
1889 define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
1890 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32:
1891 ; CHECK: # %bb.0: # %entry
1892 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1893 ; CHECK-NEXT: vrgather.vx v16, v8, a0
1894 ; CHECK-NEXT: vmv.v.v v8, v16
1897 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
1898 <vscale x 64 x i8> undef,
1899 <vscale x 64 x i8> %0,
1903 ret <vscale x 64 x i8> %a
1906 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
1914 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i32(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
1915 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i32:
1916 ; CHECK: # %bb.0: # %entry
1917 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1918 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
1921 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
1922 <vscale x 64 x i8> %0,
1923 <vscale x 64 x i8> %1,
1925 <vscale x 64 x i1> %3,
1928 ret <vscale x 64 x i8> %a
1931 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
1937 define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
1938 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32:
1939 ; CHECK: # %bb.0: # %entry
1940 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1941 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1942 ; CHECK-NEXT: vmv1r.v v8, v9
1945 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
1946 <vscale x 1 x i16> undef,
1947 <vscale x 1 x i16> %0,
1951 ret <vscale x 1 x i16> %a
1954 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
1962 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i32(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1963 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i32:
1964 ; CHECK: # %bb.0: # %entry
1965 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1966 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1969 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
1970 <vscale x 1 x i16> %0,
1971 <vscale x 1 x i16> %1,
1973 <vscale x 1 x i1> %3,
1976 ret <vscale x 1 x i16> %a
1979 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
1985 define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
1986 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32:
1987 ; CHECK: # %bb.0: # %entry
1988 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1989 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1990 ; CHECK-NEXT: vmv1r.v v8, v9
1993 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
1994 <vscale x 2 x i16> undef,
1995 <vscale x 2 x i16> %0,
1999 ret <vscale x 2 x i16> %a
2002 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
2010 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i32(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2011 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i32:
2012 ; CHECK: # %bb.0: # %entry
2013 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2014 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2017 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
2018 <vscale x 2 x i16> %0,
2019 <vscale x 2 x i16> %1,
2021 <vscale x 2 x i1> %3,
2024 ret <vscale x 2 x i16> %a
2027 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
2033 define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
2034 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32:
2035 ; CHECK: # %bb.0: # %entry
2036 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2037 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2038 ; CHECK-NEXT: vmv.v.v v8, v9
2041 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
2042 <vscale x 4 x i16> undef,
2043 <vscale x 4 x i16> %0,
2047 ret <vscale x 4 x i16> %a
2050 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
2058 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i32(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2059 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i32:
2060 ; CHECK: # %bb.0: # %entry
2061 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2062 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2065 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
2066 <vscale x 4 x i16> %0,
2067 <vscale x 4 x i16> %1,
2069 <vscale x 4 x i1> %3,
2072 ret <vscale x 4 x i16> %a
2075 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
2081 define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
2082 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32:
2083 ; CHECK: # %bb.0: # %entry
2084 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2085 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2086 ; CHECK-NEXT: vmv.v.v v8, v10
2089 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
2090 <vscale x 8 x i16> undef,
2091 <vscale x 8 x i16> %0,
2095 ret <vscale x 8 x i16> %a
2098 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
2106 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i32(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2107 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i32:
2108 ; CHECK: # %bb.0: # %entry
2109 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2110 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2113 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
2114 <vscale x 8 x i16> %0,
2115 <vscale x 8 x i16> %1,
2117 <vscale x 8 x i1> %3,
2120 ret <vscale x 8 x i16> %a
2123 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
2124 <vscale x 16 x i16>,
2125 <vscale x 16 x i16>,
2129 define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
2130 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32:
2131 ; CHECK: # %bb.0: # %entry
2132 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2133 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2134 ; CHECK-NEXT: vmv.v.v v8, v12
2137 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
2138 <vscale x 16 x i16> undef,
2139 <vscale x 16 x i16> %0,
2143 ret <vscale x 16 x i16> %a
2146 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
2147 <vscale x 16 x i16>,
2148 <vscale x 16 x i16>,
2154 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i32(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
2155 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i32:
2156 ; CHECK: # %bb.0: # %entry
2157 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
2158 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2161 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
2162 <vscale x 16 x i16> %0,
2163 <vscale x 16 x i16> %1,
2165 <vscale x 16 x i1> %3,
2168 ret <vscale x 16 x i16> %a
2171 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
2172 <vscale x 32 x i16>,
2173 <vscale x 32 x i16>,
2177 define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
2178 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32:
2179 ; CHECK: # %bb.0: # %entry
2180 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2181 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2182 ; CHECK-NEXT: vmv.v.v v8, v16
2185 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
2186 <vscale x 32 x i16> undef,
2187 <vscale x 32 x i16> %0,
2191 ret <vscale x 32 x i16> %a
2194 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
2195 <vscale x 32 x i16>,
2196 <vscale x 32 x i16>,
2202 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i32(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
2203 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i32:
2204 ; CHECK: # %bb.0: # %entry
2205 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2206 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2209 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
2210 <vscale x 32 x i16> %0,
2211 <vscale x 32 x i16> %1,
2213 <vscale x 32 x i1> %3,
2216 ret <vscale x 32 x i16> %a
2219 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
2225 define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
2226 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32:
2227 ; CHECK: # %bb.0: # %entry
2228 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2229 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2230 ; CHECK-NEXT: vmv1r.v v8, v9
2233 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
2234 <vscale x 1 x i32> undef,
2235 <vscale x 1 x i32> %0,
2239 ret <vscale x 1 x i32> %a
2242 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
2250 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
2251 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32:
2252 ; CHECK: # %bb.0: # %entry
2253 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2254 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2257 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
2258 <vscale x 1 x i32> %0,
2259 <vscale x 1 x i32> %1,
2261 <vscale x 1 x i1> %3,
2264 ret <vscale x 1 x i32> %a
2267 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
2273 define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
2274 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32:
2275 ; CHECK: # %bb.0: # %entry
2276 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2277 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2278 ; CHECK-NEXT: vmv.v.v v8, v9
2281 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
2282 <vscale x 2 x i32> undef,
2283 <vscale x 2 x i32> %0,
2287 ret <vscale x 2 x i32> %a
2290 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
2298 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2299 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32:
2300 ; CHECK: # %bb.0: # %entry
2301 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
2302 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2305 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
2306 <vscale x 2 x i32> %0,
2307 <vscale x 2 x i32> %1,
2309 <vscale x 2 x i1> %3,
2312 ret <vscale x 2 x i32> %a
2315 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
2321 define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
2322 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32:
2323 ; CHECK: # %bb.0: # %entry
2324 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2325 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2326 ; CHECK-NEXT: vmv.v.v v8, v10
2329 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
2330 <vscale x 4 x i32> undef,
2331 <vscale x 4 x i32> %0,
2335 ret <vscale x 4 x i32> %a
2338 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
2346 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2347 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32:
2348 ; CHECK: # %bb.0: # %entry
2349 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2350 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2353 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
2354 <vscale x 4 x i32> %0,
2355 <vscale x 4 x i32> %1,
2357 <vscale x 4 x i1> %3,
2360 ret <vscale x 4 x i32> %a
2363 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
2369 define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
2370 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32:
2371 ; CHECK: # %bb.0: # %entry
2372 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2373 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2374 ; CHECK-NEXT: vmv.v.v v8, v12
2377 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
2378 <vscale x 8 x i32> undef,
2379 <vscale x 8 x i32> %0,
2383 ret <vscale x 8 x i32> %a
2386 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
2394 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2395 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32:
2396 ; CHECK: # %bb.0: # %entry
2397 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
2398 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2401 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
2402 <vscale x 8 x i32> %0,
2403 <vscale x 8 x i32> %1,
2405 <vscale x 8 x i1> %3,
2408 ret <vscale x 8 x i32> %a
2411 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
2412 <vscale x 16 x i32>,
2413 <vscale x 16 x i32>,
2417 define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
2418 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32:
2419 ; CHECK: # %bb.0: # %entry
2420 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2421 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2422 ; CHECK-NEXT: vmv.v.v v8, v16
2425 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
2426 <vscale x 16 x i32> undef,
2427 <vscale x 16 x i32> %0,
2431 ret <vscale x 16 x i32> %a
2434 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
2435 <vscale x 16 x i32>,
2436 <vscale x 16 x i32>,
2442 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
2443 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32:
2444 ; CHECK: # %bb.0: # %entry
2445 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
2446 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2449 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
2450 <vscale x 16 x i32> %0,
2451 <vscale x 16 x i32> %1,
2453 <vscale x 16 x i1> %3,
2456 ret <vscale x 16 x i32> %a
2459 declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
2460 <vscale x 1 x half>,
2461 <vscale x 1 x half>,
2465 define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32(<vscale x 1 x half> %0, i32 %1, i32 %2) nounwind {
2466 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32:
2467 ; CHECK: # %bb.0: # %entry
2468 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2469 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2470 ; CHECK-NEXT: vmv1r.v v8, v9
2473 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
2474 <vscale x 1 x half> undef,
2475 <vscale x 1 x half> %0,
2479 ret <vscale x 1 x half> %a
2482 declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
2483 <vscale x 1 x half>,
2484 <vscale x 1 x half>,
2490 define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i32(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
2491 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i32:
2492 ; CHECK: # %bb.0: # %entry
2493 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2494 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2497 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
2498 <vscale x 1 x half> %0,
2499 <vscale x 1 x half> %1,
2501 <vscale x 1 x i1> %3,
2504 ret <vscale x 1 x half> %a
2507 declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
2508 <vscale x 2 x half>,
2509 <vscale x 2 x half>,
2513 define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32(<vscale x 2 x half> %0, i32 %1, i32 %2) nounwind {
2514 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32:
2515 ; CHECK: # %bb.0: # %entry
2516 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2517 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2518 ; CHECK-NEXT: vmv1r.v v8, v9
2521 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
2522 <vscale x 2 x half> undef,
2523 <vscale x 2 x half> %0,
2527 ret <vscale x 2 x half> %a
2530 declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
2531 <vscale x 2 x half>,
2532 <vscale x 2 x half>,
2538 define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i32(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2539 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i32:
2540 ; CHECK: # %bb.0: # %entry
2541 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2542 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2545 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
2546 <vscale x 2 x half> %0,
2547 <vscale x 2 x half> %1,
2549 <vscale x 2 x i1> %3,
2552 ret <vscale x 2 x half> %a
2555 declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
2556 <vscale x 4 x half>,
2557 <vscale x 4 x half>,
2561 define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32(<vscale x 4 x half> %0, i32 %1, i32 %2) nounwind {
2562 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32:
2563 ; CHECK: # %bb.0: # %entry
2564 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2565 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2566 ; CHECK-NEXT: vmv.v.v v8, v9
2569 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
2570 <vscale x 4 x half> undef,
2571 <vscale x 4 x half> %0,
2575 ret <vscale x 4 x half> %a
2578 declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
2579 <vscale x 4 x half>,
2580 <vscale x 4 x half>,
2586 define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i32(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2587 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i32:
2588 ; CHECK: # %bb.0: # %entry
2589 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2590 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2593 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
2594 <vscale x 4 x half> %0,
2595 <vscale x 4 x half> %1,
2597 <vscale x 4 x i1> %3,
2600 ret <vscale x 4 x half> %a
2603 declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
2604 <vscale x 8 x half>,
2605 <vscale x 8 x half>,
2609 define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32(<vscale x 8 x half> %0, i32 %1, i32 %2) nounwind {
2610 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32:
2611 ; CHECK: # %bb.0: # %entry
2612 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2613 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2614 ; CHECK-NEXT: vmv.v.v v8, v10
2617 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
2618 <vscale x 8 x half> undef,
2619 <vscale x 8 x half> %0,
2623 ret <vscale x 8 x half> %a
2626 declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
2627 <vscale x 8 x half>,
2628 <vscale x 8 x half>,
2634 define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i32(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2635 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i32:
2636 ; CHECK: # %bb.0: # %entry
2637 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2638 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2641 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
2642 <vscale x 8 x half> %0,
2643 <vscale x 8 x half> %1,
2645 <vscale x 8 x i1> %3,
2648 ret <vscale x 8 x half> %a
2651 declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
2652 <vscale x 16 x half>,
2653 <vscale x 16 x half>,
2657 define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32(<vscale x 16 x half> %0, i32 %1, i32 %2) nounwind {
2658 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32:
2659 ; CHECK: # %bb.0: # %entry
2660 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2661 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2662 ; CHECK-NEXT: vmv.v.v v8, v12
2665 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
2666 <vscale x 16 x half> undef,
2667 <vscale x 16 x half> %0,
2671 ret <vscale x 16 x half> %a
2674 declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
2675 <vscale x 16 x half>,
2676 <vscale x 16 x half>,
2682 define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i32(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
2683 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i32:
2684 ; CHECK: # %bb.0: # %entry
2685 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
2686 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2689 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
2690 <vscale x 16 x half> %0,
2691 <vscale x 16 x half> %1,
2693 <vscale x 16 x i1> %3,
2696 ret <vscale x 16 x half> %a
2699 declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
2700 <vscale x 32 x half>,
2701 <vscale x 32 x half>,
2705 define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32(<vscale x 32 x half> %0, i32 %1, i32 %2) nounwind {
2706 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32:
2707 ; CHECK: # %bb.0: # %entry
2708 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2709 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2710 ; CHECK-NEXT: vmv.v.v v8, v16
2713 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
2714 <vscale x 32 x half> undef,
2715 <vscale x 32 x half> %0,
2719 ret <vscale x 32 x half> %a
2722 declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
2723 <vscale x 32 x half>,
2724 <vscale x 32 x half>,
2730 define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i32(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
2731 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i32:
2732 ; CHECK: # %bb.0: # %entry
2733 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2734 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2737 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
2738 <vscale x 32 x half> %0,
2739 <vscale x 32 x half> %1,
2741 <vscale x 32 x i1> %3,
2744 ret <vscale x 32 x half> %a
2747 declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
2748 <vscale x 1 x float>,
2749 <vscale x 1 x float>,
2753 define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, i32 %1, i32 %2) nounwind {
2754 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32:
2755 ; CHECK: # %bb.0: # %entry
2756 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2757 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2758 ; CHECK-NEXT: vmv1r.v v8, v9
2761 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
2762 <vscale x 1 x float> undef,
2763 <vscale x 1 x float> %0,
2767 ret <vscale x 1 x float> %a
2770 declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
2771 <vscale x 1 x float>,
2772 <vscale x 1 x float>,
2778 define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
2779 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32:
2780 ; CHECK: # %bb.0: # %entry
2781 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2782 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2785 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
2786 <vscale x 1 x float> %0,
2787 <vscale x 1 x float> %1,
2789 <vscale x 1 x i1> %3,
2792 ret <vscale x 1 x float> %a
2795 declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
2796 <vscale x 2 x float>,
2797 <vscale x 2 x float>,
2801 define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, i32 %1, i32 %2) nounwind {
2802 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32:
2803 ; CHECK: # %bb.0: # %entry
2804 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2805 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2806 ; CHECK-NEXT: vmv.v.v v8, v9
2809 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
2810 <vscale x 2 x float> undef,
2811 <vscale x 2 x float> %0,
2815 ret <vscale x 2 x float> %a
2818 declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
2819 <vscale x 2 x float>,
2820 <vscale x 2 x float>,
2826 define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2827 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32:
2828 ; CHECK: # %bb.0: # %entry
2829 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
2830 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2833 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
2834 <vscale x 2 x float> %0,
2835 <vscale x 2 x float> %1,
2837 <vscale x 2 x i1> %3,
2840 ret <vscale x 2 x float> %a
2843 declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
2844 <vscale x 4 x float>,
2845 <vscale x 4 x float>,
2849 define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, i32 %1, i32 %2) nounwind {
2850 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32:
2851 ; CHECK: # %bb.0: # %entry
2852 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2853 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2854 ; CHECK-NEXT: vmv.v.v v8, v10
2857 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
2858 <vscale x 4 x float> undef,
2859 <vscale x 4 x float> %0,
2863 ret <vscale x 4 x float> %a
2866 declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
2867 <vscale x 4 x float>,
2868 <vscale x 4 x float>,
2874 define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2875 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32:
2876 ; CHECK: # %bb.0: # %entry
2877 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2878 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2881 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
2882 <vscale x 4 x float> %0,
2883 <vscale x 4 x float> %1,
2885 <vscale x 4 x i1> %3,
2888 ret <vscale x 4 x float> %a
2891 declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
2892 <vscale x 8 x float>,
2893 <vscale x 8 x float>,
2897 define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, i32 %1, i32 %2) nounwind {
2898 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32:
2899 ; CHECK: # %bb.0: # %entry
2900 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2901 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2902 ; CHECK-NEXT: vmv.v.v v8, v12
2905 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
2906 <vscale x 8 x float> undef,
2907 <vscale x 8 x float> %0,
2911 ret <vscale x 8 x float> %a
2914 declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
2915 <vscale x 8 x float>,
2916 <vscale x 8 x float>,
2922 define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2923 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32:
2924 ; CHECK: # %bb.0: # %entry
2925 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
2926 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2929 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
2930 <vscale x 8 x float> %0,
2931 <vscale x 8 x float> %1,
2933 <vscale x 8 x i1> %3,
2936 ret <vscale x 8 x float> %a
2939 declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
2940 <vscale x 16 x float>,
2941 <vscale x 16 x float>,
2945 define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, i32 %1, i32 %2) nounwind {
2946 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32:
2947 ; CHECK: # %bb.0: # %entry
2948 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2949 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2950 ; CHECK-NEXT: vmv.v.v v8, v16
2953 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
2954 <vscale x 16 x float> undef,
2955 <vscale x 16 x float> %0,
2959 ret <vscale x 16 x float> %a
2962 declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
2963 <vscale x 16 x float>,
2964 <vscale x 16 x float>,
2970 define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
2971 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32:
2972 ; CHECK: # %bb.0: # %entry
2973 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
2974 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2977 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
2978 <vscale x 16 x float> %0,
2979 <vscale x 16 x float> %1,
2981 <vscale x 16 x i1> %3,
2984 ret <vscale x 16 x float> %a
2987 declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
2988 <vscale x 1 x double>,
2989 <vscale x 1 x double>,
2993 define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32(<vscale x 1 x double> %0, i32 %1, i32 %2) nounwind {
2994 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32:
2995 ; CHECK: # %bb.0: # %entry
2996 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2997 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2998 ; CHECK-NEXT: vmv.v.v v8, v9
3001 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
3002 <vscale x 1 x double> undef,
3003 <vscale x 1 x double> %0,
3007 ret <vscale x 1 x double> %a
3010 declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
3011 <vscale x 1 x double>,
3012 <vscale x 1 x double>,
3018 define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
3019 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i32:
3020 ; CHECK: # %bb.0: # %entry
3021 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3022 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
3025 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
3026 <vscale x 1 x double> %0,
3027 <vscale x 1 x double> %1,
3029 <vscale x 1 x i1> %3,
3032 ret <vscale x 1 x double> %a
3035 declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
3036 <vscale x 2 x double>,
3037 <vscale x 2 x double>,
3041 define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32(<vscale x 2 x double> %0, i32 %1, i32 %2) nounwind {
3042 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32:
3043 ; CHECK: # %bb.0: # %entry
3044 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3045 ; CHECK-NEXT: vrgather.vx v10, v8, a0
3046 ; CHECK-NEXT: vmv.v.v v8, v10
3049 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
3050 <vscale x 2 x double> undef,
3051 <vscale x 2 x double> %0,
3055 ret <vscale x 2 x double> %a
3058 declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
3059 <vscale x 2 x double>,
3060 <vscale x 2 x double>,
3066 define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
3067 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i32:
3068 ; CHECK: # %bb.0: # %entry
3069 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
3070 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
3073 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
3074 <vscale x 2 x double> %0,
3075 <vscale x 2 x double> %1,
3077 <vscale x 2 x i1> %3,
3080 ret <vscale x 2 x double> %a
3083 declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
3084 <vscale x 4 x double>,
3085 <vscale x 4 x double>,
3089 define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32(<vscale x 4 x double> %0, i32 %1, i32 %2) nounwind {
3090 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32:
3091 ; CHECK: # %bb.0: # %entry
3092 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
3093 ; CHECK-NEXT: vrgather.vx v12, v8, a0
3094 ; CHECK-NEXT: vmv.v.v v8, v12
3097 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
3098 <vscale x 4 x double> undef,
3099 <vscale x 4 x double> %0,
3103 ret <vscale x 4 x double> %a
3106 declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
3107 <vscale x 4 x double>,
3108 <vscale x 4 x double>,
3114 define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
3115 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i32:
3116 ; CHECK: # %bb.0: # %entry
3117 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
3118 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
3121 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
3122 <vscale x 4 x double> %0,
3123 <vscale x 4 x double> %1,
3125 <vscale x 4 x i1> %3,
3128 ret <vscale x 4 x double> %a
3131 declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
3132 <vscale x 8 x double>,
3133 <vscale x 8 x double>,
3137 define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32(<vscale x 8 x double> %0, i32 %1, i32 %2) nounwind {
3138 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32:
3139 ; CHECK: # %bb.0: # %entry
3140 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3141 ; CHECK-NEXT: vrgather.vx v16, v8, a0
3142 ; CHECK-NEXT: vmv.v.v v8, v16
3145 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
3146 <vscale x 8 x double> undef,
3147 <vscale x 8 x double> %0,
3151 ret <vscale x 8 x double> %a
3154 declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
3155 <vscale x 8 x double>,
3156 <vscale x 8 x double>,
3162 define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
3163 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i32:
3164 ; CHECK: # %bb.0: # %entry
3165 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
3166 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
3169 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
3170 <vscale x 8 x double> %0,
3171 <vscale x 8 x double> %1,
3173 <vscale x 8 x i1> %3,
3176 ret <vscale x 8 x double> %a
3179 define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32(<vscale x 1 x i8> %0, i32 %1) nounwind {
3180 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32:
3181 ; CHECK: # %bb.0: # %entry
3182 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
3183 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3184 ; CHECK-NEXT: vmv1r.v v8, v9
3187 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(
3188 <vscale x 1 x i8> undef,
3189 <vscale x 1 x i8> %0,
3193 ret <vscale x 1 x i8> %a
3196 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i32(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
3197 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i32:
3198 ; CHECK: # %bb.0: # %entry
3199 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
3200 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3203 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(
3204 <vscale x 1 x i8> %0,
3205 <vscale x 1 x i8> %1,
3207 <vscale x 1 x i1> %2,
3210 ret <vscale x 1 x i8> %a
3213 define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32(<vscale x 2 x i8> %0, i32 %1) nounwind {
3214 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32:
3215 ; CHECK: # %bb.0: # %entry
3216 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
3217 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3218 ; CHECK-NEXT: vmv1r.v v8, v9
3221 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(
3222 <vscale x 2 x i8> undef,
3223 <vscale x 2 x i8> %0,
3227 ret <vscale x 2 x i8> %a
3230 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i32(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
3231 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i32:
3232 ; CHECK: # %bb.0: # %entry
3233 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
3234 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3237 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(
3238 <vscale x 2 x i8> %0,
3239 <vscale x 2 x i8> %1,
3241 <vscale x 2 x i1> %2,
3244 ret <vscale x 2 x i8> %a
3247 define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32(<vscale x 4 x i8> %0, i32 %1) nounwind {
3248 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32:
3249 ; CHECK: # %bb.0: # %entry
3250 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
3251 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3252 ; CHECK-NEXT: vmv1r.v v8, v9
3255 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(
3256 <vscale x 4 x i8> undef,
3257 <vscale x 4 x i8> %0,
3261 ret <vscale x 4 x i8> %a
3264 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i32(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
3265 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i32:
3266 ; CHECK: # %bb.0: # %entry
3267 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
3268 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3271 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(
3272 <vscale x 4 x i8> %0,
3273 <vscale x 4 x i8> %1,
3275 <vscale x 4 x i1> %2,
3278 ret <vscale x 4 x i8> %a
3281 define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32(<vscale x 8 x i8> %0, i32 %1) nounwind {
3282 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32:
3283 ; CHECK: # %bb.0: # %entry
3284 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
3285 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3286 ; CHECK-NEXT: vmv.v.v v8, v9
3289 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(
3290 <vscale x 8 x i8> undef,
3291 <vscale x 8 x i8> %0,
3295 ret <vscale x 8 x i8> %a
3298 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i32(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
3299 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i32:
3300 ; CHECK: # %bb.0: # %entry
3301 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
3302 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3305 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(
3306 <vscale x 8 x i8> %0,
3307 <vscale x 8 x i8> %1,
3309 <vscale x 8 x i1> %2,
3312 ret <vscale x 8 x i8> %a
3315 define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32(<vscale x 16 x i8> %0, i32 %1) nounwind {
3316 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32:
3317 ; CHECK: # %bb.0: # %entry
3318 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
3319 ; CHECK-NEXT: vrgather.vi v10, v8, 9
3320 ; CHECK-NEXT: vmv.v.v v8, v10
3323 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(
3324 <vscale x 16 x i8> undef,
3325 <vscale x 16 x i8> %0,
3329 ret <vscale x 16 x i8> %a
3332 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i32(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
3333 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i32:
3334 ; CHECK: # %bb.0: # %entry
3335 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
3336 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
3339 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(
3340 <vscale x 16 x i8> %0,
3341 <vscale x 16 x i8> %1,
3343 <vscale x 16 x i1> %2,
3346 ret <vscale x 16 x i8> %a
3349 define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32(<vscale x 32 x i8> %0, i32 %1) nounwind {
3350 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32:
3351 ; CHECK: # %bb.0: # %entry
3352 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
3353 ; CHECK-NEXT: vrgather.vi v12, v8, 9
3354 ; CHECK-NEXT: vmv.v.v v8, v12
3357 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(
3358 <vscale x 32 x i8> undef,
3359 <vscale x 32 x i8> %0,
3363 ret <vscale x 32 x i8> %a
3366 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i32(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
3367 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i32:
3368 ; CHECK: # %bb.0: # %entry
3369 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
3370 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
3373 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(
3374 <vscale x 32 x i8> %0,
3375 <vscale x 32 x i8> %1,
3377 <vscale x 32 x i1> %2,
3380 ret <vscale x 32 x i8> %a
3383 define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32(<vscale x 64 x i8> %0, i32 %1) nounwind {
3384 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32:
3385 ; CHECK: # %bb.0: # %entry
3386 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
3387 ; CHECK-NEXT: vrgather.vi v16, v8, 9
3388 ; CHECK-NEXT: vmv.v.v v8, v16
3391 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(
3392 <vscale x 64 x i8> undef,
3393 <vscale x 64 x i8> %0,
3397 ret <vscale x 64 x i8> %a
3400 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i32(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
3401 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i32:
3402 ; CHECK: # %bb.0: # %entry
3403 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
3404 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
3407 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(
3408 <vscale x 64 x i8> %0,
3409 <vscale x 64 x i8> %1,
3411 <vscale x 64 x i1> %2,
3414 ret <vscale x 64 x i8> %a
3417 define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32(<vscale x 1 x i16> %0, i32 %1) nounwind {
3418 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32:
3419 ; CHECK: # %bb.0: # %entry
3420 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3421 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3422 ; CHECK-NEXT: vmv1r.v v8, v9
3425 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(
3426 <vscale x 1 x i16> undef,
3427 <vscale x 1 x i16> %0,
3431 ret <vscale x 1 x i16> %a
3434 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i32(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
3435 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i32:
3436 ; CHECK: # %bb.0: # %entry
3437 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
3438 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3441 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(
3442 <vscale x 1 x i16> %0,
3443 <vscale x 1 x i16> %1,
3445 <vscale x 1 x i1> %2,
3448 ret <vscale x 1 x i16> %a
3451 define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32(<vscale x 2 x i16> %0, i32 %1) nounwind {
3452 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32:
3453 ; CHECK: # %bb.0: # %entry
3454 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3455 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3456 ; CHECK-NEXT: vmv1r.v v8, v9
3459 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(
3460 <vscale x 2 x i16> undef,
3461 <vscale x 2 x i16> %0,
3465 ret <vscale x 2 x i16> %a
3468 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i32(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
3469 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i32:
3470 ; CHECK: # %bb.0: # %entry
3471 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
3472 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3475 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(
3476 <vscale x 2 x i16> %0,
3477 <vscale x 2 x i16> %1,
3479 <vscale x 2 x i1> %2,
3482 ret <vscale x 2 x i16> %a
3485 define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32(<vscale x 4 x i16> %0, i32 %1) nounwind {
3486 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32:
3487 ; CHECK: # %bb.0: # %entry
3488 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3489 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3490 ; CHECK-NEXT: vmv.v.v v8, v9
3493 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(
3494 <vscale x 4 x i16> undef,
3495 <vscale x 4 x i16> %0,
3499 ret <vscale x 4 x i16> %a
3502 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i32(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
3503 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i32:
3504 ; CHECK: # %bb.0: # %entry
3505 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
3506 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3509 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(
3510 <vscale x 4 x i16> %0,
3511 <vscale x 4 x i16> %1,
3513 <vscale x 4 x i1> %2,
3516 ret <vscale x 4 x i16> %a
3519 define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32(<vscale x 8 x i16> %0, i32 %1) nounwind {
3520 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32:
3521 ; CHECK: # %bb.0: # %entry
3522 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3523 ; CHECK-NEXT: vrgather.vi v10, v8, 9
3524 ; CHECK-NEXT: vmv.v.v v8, v10
3527 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(
3528 <vscale x 8 x i16> undef,
3529 <vscale x 8 x i16> %0,
3533 ret <vscale x 8 x i16> %a
3536 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i32(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
3537 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i32:
3538 ; CHECK: # %bb.0: # %entry
3539 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
3540 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
3543 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(
3544 <vscale x 8 x i16> %0,
3545 <vscale x 8 x i16> %1,
3547 <vscale x 8 x i1> %2,
3550 ret <vscale x 8 x i16> %a
3553 define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32(<vscale x 16 x i16> %0, i32 %1) nounwind {
3554 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32:
3555 ; CHECK: # %bb.0: # %entry
3556 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3557 ; CHECK-NEXT: vrgather.vi v12, v8, 9
3558 ; CHECK-NEXT: vmv.v.v v8, v12
3561 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(
3562 <vscale x 16 x i16> undef,
3563 <vscale x 16 x i16> %0,
3567 ret <vscale x 16 x i16> %a
3570 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i32(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
3571 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i32:
3572 ; CHECK: # %bb.0: # %entry
3573 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
3574 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
3577 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(
3578 <vscale x 16 x i16> %0,
3579 <vscale x 16 x i16> %1,
3581 <vscale x 16 x i1> %2,
3584 ret <vscale x 16 x i16> %a
3587 define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32(<vscale x 32 x i16> %0, i32 %1) nounwind {
3588 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32:
3589 ; CHECK: # %bb.0: # %entry
3590 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3591 ; CHECK-NEXT: vrgather.vi v16, v8, 9
3592 ; CHECK-NEXT: vmv.v.v v8, v16
3595 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(
3596 <vscale x 32 x i16> undef,
3597 <vscale x 32 x i16> %0,
3601 ret <vscale x 32 x i16> %a
3604 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i32(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
3605 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i32:
3606 ; CHECK: # %bb.0: # %entry
3607 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
3608 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
3611 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(
3612 <vscale x 32 x i16> %0,
3613 <vscale x 32 x i16> %1,
3615 <vscale x 32 x i1> %2,
3618 ret <vscale x 32 x i16> %a
3621 define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
3622 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32:
3623 ; CHECK: # %bb.0: # %entry
3624 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3625 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3626 ; CHECK-NEXT: vmv1r.v v8, v9
3629 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(
3630 <vscale x 1 x i32> undef,
3631 <vscale x 1 x i32> %0,
3635 ret <vscale x 1 x i32> %a
3638 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
3639 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32:
3640 ; CHECK: # %bb.0: # %entry
3641 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
3642 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3645 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(
3646 <vscale x 1 x i32> %0,
3647 <vscale x 1 x i32> %1,
3649 <vscale x 1 x i1> %2,
3652 ret <vscale x 1 x i32> %a
3655 define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
3656 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32:
3657 ; CHECK: # %bb.0: # %entry
3658 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3659 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3660 ; CHECK-NEXT: vmv.v.v v8, v9
3663 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(
3664 <vscale x 2 x i32> undef,
3665 <vscale x 2 x i32> %0,
3669 ret <vscale x 2 x i32> %a
3672 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
3673 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32:
3674 ; CHECK: # %bb.0: # %entry
3675 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
3676 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3679 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(
3680 <vscale x 2 x i32> %0,
3681 <vscale x 2 x i32> %1,
3683 <vscale x 2 x i1> %2,
3686 ret <vscale x 2 x i32> %a
3689 define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
3690 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32:
3691 ; CHECK: # %bb.0: # %entry
3692 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3693 ; CHECK-NEXT: vrgather.vi v10, v8, 9
3694 ; CHECK-NEXT: vmv.v.v v8, v10
3697 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(
3698 <vscale x 4 x i32> undef,
3699 <vscale x 4 x i32> %0,
3703 ret <vscale x 4 x i32> %a
3706 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
3707 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32:
3708 ; CHECK: # %bb.0: # %entry
3709 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
3710 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
3713 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(
3714 <vscale x 4 x i32> %0,
3715 <vscale x 4 x i32> %1,
3717 <vscale x 4 x i1> %2,
3720 ret <vscale x 4 x i32> %a
3723 define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
3724 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32:
3725 ; CHECK: # %bb.0: # %entry
3726 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3727 ; CHECK-NEXT: vrgather.vi v12, v8, 9
3728 ; CHECK-NEXT: vmv.v.v v8, v12
3731 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(
3732 <vscale x 8 x i32> undef,
3733 <vscale x 8 x i32> %0,
3737 ret <vscale x 8 x i32> %a
3740 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
3741 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32:
3742 ; CHECK: # %bb.0: # %entry
3743 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
3744 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
3747 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(
3748 <vscale x 8 x i32> %0,
3749 <vscale x 8 x i32> %1,
3751 <vscale x 8 x i1> %2,
3754 ret <vscale x 8 x i32> %a
3757 define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
3758 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32:
3759 ; CHECK: # %bb.0: # %entry
3760 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
3761 ; CHECK-NEXT: vrgather.vi v16, v8, 9
3762 ; CHECK-NEXT: vmv.v.v v8, v16
3765 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(
3766 <vscale x 16 x i32> undef,
3767 <vscale x 16 x i32> %0,
3771 ret <vscale x 16 x i32> %a
3774 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
3775 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32:
3776 ; CHECK: # %bb.0: # %entry
3777 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
3778 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
3781 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(
3782 <vscale x 16 x i32> %0,
3783 <vscale x 16 x i32> %1,
3785 <vscale x 16 x i1> %2,
3788 ret <vscale x 16 x i32> %a
3791 define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32(<vscale x 1 x half> %0, i32 %1) nounwind {
3792 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32:
3793 ; CHECK: # %bb.0: # %entry
3794 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3795 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3796 ; CHECK-NEXT: vmv1r.v v8, v9
3799 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i32(
3800 <vscale x 1 x half> undef,
3801 <vscale x 1 x half> %0,
3805 ret <vscale x 1 x half> %a
3808 define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i32(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
3809 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i32:
3810 ; CHECK: # %bb.0: # %entry
3811 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
3812 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3815 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i32(
3816 <vscale x 1 x half> %0,
3817 <vscale x 1 x half> %1,
3819 <vscale x 1 x i1> %2,
3822 ret <vscale x 1 x half> %a
3825 define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32(<vscale x 2 x half> %0, i32 %1) nounwind {
3826 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32:
3827 ; CHECK: # %bb.0: # %entry
3828 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3829 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3830 ; CHECK-NEXT: vmv1r.v v8, v9
3833 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i32(
3834 <vscale x 2 x half> undef,
3835 <vscale x 2 x half> %0,
3839 ret <vscale x 2 x half> %a
3842 define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i32(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
3843 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i32:
3844 ; CHECK: # %bb.0: # %entry
3845 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
3846 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3849 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i32(
3850 <vscale x 2 x half> %0,
3851 <vscale x 2 x half> %1,
3853 <vscale x 2 x i1> %2,
3856 ret <vscale x 2 x half> %a
3859 define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32(<vscale x 4 x half> %0, i32 %1) nounwind {
3860 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32:
3861 ; CHECK: # %bb.0: # %entry
3862 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3863 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3864 ; CHECK-NEXT: vmv.v.v v8, v9
3867 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i32(
3868 <vscale x 4 x half> undef,
3869 <vscale x 4 x half> %0,
3873 ret <vscale x 4 x half> %a
3876 define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i32(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
3877 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i32:
3878 ; CHECK: # %bb.0: # %entry
3879 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
3880 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3883 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i32(
3884 <vscale x 4 x half> %0,
3885 <vscale x 4 x half> %1,
3887 <vscale x 4 x i1> %2,
3890 ret <vscale x 4 x half> %a
3893 define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32(<vscale x 8 x half> %0, i32 %1) nounwind {
3894 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32:
3895 ; CHECK: # %bb.0: # %entry
3896 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3897 ; CHECK-NEXT: vrgather.vi v10, v8, 9
3898 ; CHECK-NEXT: vmv.v.v v8, v10
3901 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i32(
3902 <vscale x 8 x half> undef,
3903 <vscale x 8 x half> %0,
3907 ret <vscale x 8 x half> %a
3910 define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i32(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
3911 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i32:
3912 ; CHECK: # %bb.0: # %entry
3913 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
3914 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
3917 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i32(
3918 <vscale x 8 x half> %0,
3919 <vscale x 8 x half> %1,
3921 <vscale x 8 x i1> %2,
3924 ret <vscale x 8 x half> %a
3927 define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32(<vscale x 16 x half> %0, i32 %1) nounwind {
3928 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32:
3929 ; CHECK: # %bb.0: # %entry
3930 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3931 ; CHECK-NEXT: vrgather.vi v12, v8, 9
3932 ; CHECK-NEXT: vmv.v.v v8, v12
3935 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i32(
3936 <vscale x 16 x half> undef,
3937 <vscale x 16 x half> %0,
3941 ret <vscale x 16 x half> %a
3944 define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i32(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
3945 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i32:
3946 ; CHECK: # %bb.0: # %entry
3947 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
3948 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
3951 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i32(
3952 <vscale x 16 x half> %0,
3953 <vscale x 16 x half> %1,
3955 <vscale x 16 x i1> %2,
3958 ret <vscale x 16 x half> %a
3961 define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32(<vscale x 32 x half> %0, i32 %1) nounwind {
3962 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32:
3963 ; CHECK: # %bb.0: # %entry
3964 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3965 ; CHECK-NEXT: vrgather.vi v16, v8, 9
3966 ; CHECK-NEXT: vmv.v.v v8, v16
3969 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i32(
3970 <vscale x 32 x half> undef,
3971 <vscale x 32 x half> %0,
3975 ret <vscale x 32 x half> %a
3978 define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i32(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
3979 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i32:
3980 ; CHECK: # %bb.0: # %entry
3981 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
3982 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
3985 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i32(
3986 <vscale x 32 x half> %0,
3987 <vscale x 32 x half> %1,
3989 <vscale x 32 x i1> %2,
3992 ret <vscale x 32 x half> %a
3995 define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, i32 %1) nounwind {
3996 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32:
3997 ; CHECK: # %bb.0: # %entry
3998 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3999 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4000 ; CHECK-NEXT: vmv1r.v v8, v9
4003 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(
4004 <vscale x 1 x float> undef,
4005 <vscale x 1 x float> %0,
4009 ret <vscale x 1 x float> %a
4012 define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
4013 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32:
4014 ; CHECK: # %bb.0: # %entry
4015 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
4016 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4019 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(
4020 <vscale x 1 x float> %0,
4021 <vscale x 1 x float> %1,
4023 <vscale x 1 x i1> %2,
4026 ret <vscale x 1 x float> %a
4029 define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, i32 %1) nounwind {
4030 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32:
4031 ; CHECK: # %bb.0: # %entry
4032 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4033 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4034 ; CHECK-NEXT: vmv.v.v v8, v9
4037 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(
4038 <vscale x 2 x float> undef,
4039 <vscale x 2 x float> %0,
4043 ret <vscale x 2 x float> %a
4046 define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
4047 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32:
4048 ; CHECK: # %bb.0: # %entry
4049 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
4050 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4053 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(
4054 <vscale x 2 x float> %0,
4055 <vscale x 2 x float> %1,
4057 <vscale x 2 x i1> %2,
4060 ret <vscale x 2 x float> %a
4063 define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, i32 %1) nounwind {
4064 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32:
4065 ; CHECK: # %bb.0: # %entry
4066 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4067 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4068 ; CHECK-NEXT: vmv.v.v v8, v10
4071 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(
4072 <vscale x 4 x float> undef,
4073 <vscale x 4 x float> %0,
4077 ret <vscale x 4 x float> %a
4080 define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
4081 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32:
4082 ; CHECK: # %bb.0: # %entry
4083 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
4084 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4087 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(
4088 <vscale x 4 x float> %0,
4089 <vscale x 4 x float> %1,
4091 <vscale x 4 x i1> %2,
4094 ret <vscale x 4 x float> %a
4097 define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, i32 %1) nounwind {
4098 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32:
4099 ; CHECK: # %bb.0: # %entry
4100 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4101 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4102 ; CHECK-NEXT: vmv.v.v v8, v12
4105 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(
4106 <vscale x 8 x float> undef,
4107 <vscale x 8 x float> %0,
4111 ret <vscale x 8 x float> %a
4114 define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
4115 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32:
4116 ; CHECK: # %bb.0: # %entry
4117 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
4118 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4121 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(
4122 <vscale x 8 x float> %0,
4123 <vscale x 8 x float> %1,
4125 <vscale x 8 x i1> %2,
4128 ret <vscale x 8 x float> %a
4131 define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, i32 %1) nounwind {
4132 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32:
4133 ; CHECK: # %bb.0: # %entry
4134 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4135 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4136 ; CHECK-NEXT: vmv.v.v v8, v16
4139 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(
4140 <vscale x 16 x float> undef,
4141 <vscale x 16 x float> %0,
4145 ret <vscale x 16 x float> %a
4148 define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
4149 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32:
4150 ; CHECK: # %bb.0: # %entry
4151 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
4152 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4155 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(
4156 <vscale x 16 x float> %0,
4157 <vscale x 16 x float> %1,
4159 <vscale x 16 x i1> %2,
4162 ret <vscale x 16 x float> %a
4165 define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32(<vscale x 1 x double> %0, i32 %1) nounwind {
4166 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32:
4167 ; CHECK: # %bb.0: # %entry
4168 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4169 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4170 ; CHECK-NEXT: vmv.v.v v8, v9
4173 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(
4174 <vscale x 1 x double> undef,
4175 <vscale x 1 x double> %0,
4179 ret <vscale x 1 x double> %a
4182 define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
4183 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i32:
4184 ; CHECK: # %bb.0: # %entry
4185 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
4186 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4189 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(
4190 <vscale x 1 x double> %0,
4191 <vscale x 1 x double> %1,
4193 <vscale x 1 x i1> %2,
4196 ret <vscale x 1 x double> %a
4199 define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32(<vscale x 2 x double> %0, i32 %1) nounwind {
4200 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32:
4201 ; CHECK: # %bb.0: # %entry
4202 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4203 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4204 ; CHECK-NEXT: vmv.v.v v8, v10
4207 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(
4208 <vscale x 2 x double> undef,
4209 <vscale x 2 x double> %0,
4213 ret <vscale x 2 x double> %a
4216 define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
4217 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i32:
4218 ; CHECK: # %bb.0: # %entry
4219 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
4220 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4223 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(
4224 <vscale x 2 x double> %0,
4225 <vscale x 2 x double> %1,
4227 <vscale x 2 x i1> %2,
4230 ret <vscale x 2 x double> %a
4233 define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32(<vscale x 4 x double> %0, i32 %1) nounwind {
4234 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32:
4235 ; CHECK: # %bb.0: # %entry
4236 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4237 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4238 ; CHECK-NEXT: vmv.v.v v8, v12
4241 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(
4242 <vscale x 4 x double> undef,
4243 <vscale x 4 x double> %0,
4247 ret <vscale x 4 x double> %a
4250 define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
4251 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i32:
4252 ; CHECK: # %bb.0: # %entry
4253 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
4254 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4257 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(
4258 <vscale x 4 x double> %0,
4259 <vscale x 4 x double> %1,
4261 <vscale x 4 x i1> %2,
4264 ret <vscale x 4 x double> %a
4267 define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32(<vscale x 8 x double> %0, i32 %1) nounwind {
4268 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32:
4269 ; CHECK: # %bb.0: # %entry
4270 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4271 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4272 ; CHECK-NEXT: vmv.v.v v8, v16
4275 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(
4276 <vscale x 8 x double> undef,
4277 <vscale x 8 x double> %0,
4281 ret <vscale x 8 x double> %a
4284 define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
4285 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i32:
4286 ; CHECK: # %bb.0: # %entry
4287 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
4288 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4291 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(
4292 <vscale x 8 x double> %0,
4293 <vscale x 8 x double> %1,
4295 <vscale x 8 x i1> %2,
4298 ret <vscale x 8 x double> %a