1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
11 define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
12 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
13 ; CHECK: # %bb.0: # %entry
14 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
15 ; CHECK-NEXT: vrgather.vv v10, v8, v9
16 ; CHECK-NEXT: vmv1r.v v8, v10
19 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(
20 <vscale x 1 x i8> undef,
25 ret <vscale x 1 x i8> %a
28 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(
36 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
40 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
43 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(
50 ret <vscale x 1 x i8> %a
53 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(
59 define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
63 ; CHECK-NEXT: vrgather.vv v10, v8, v9
64 ; CHECK-NEXT: vmv1r.v v8, v10
67 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(
68 <vscale x 2 x i8> undef,
73 ret <vscale x 2 x i8> %a
76 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(
84 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
88 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
91 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(
98 ret <vscale x 2 x i8> %a
101 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(
107 define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
111 ; CHECK-NEXT: vrgather.vv v10, v8, v9
112 ; CHECK-NEXT: vmv1r.v v8, v10
115 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(
116 <vscale x 4 x i8> undef,
117 <vscale x 4 x i8> %0,
118 <vscale x 4 x i8> %1,
121 ret <vscale x 4 x i8> %a
124 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(
132 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
133 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8:
134 ; CHECK: # %bb.0: # %entry
135 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
136 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
139 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(
140 <vscale x 4 x i8> %0,
141 <vscale x 4 x i8> %1,
142 <vscale x 4 x i8> %2,
143 <vscale x 4 x i1> %3,
146 ret <vscale x 4 x i8> %a
149 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(
155 define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
156 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8:
157 ; CHECK: # %bb.0: # %entry
158 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
159 ; CHECK-NEXT: vrgather.vv v10, v8, v9
160 ; CHECK-NEXT: vmv.v.v v8, v10
163 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(
164 <vscale x 8 x i8> undef,
165 <vscale x 8 x i8> %0,
166 <vscale x 8 x i8> %1,
169 ret <vscale x 8 x i8> %a
172 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(
180 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
181 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8:
182 ; CHECK: # %bb.0: # %entry
183 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
184 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
187 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(
188 <vscale x 8 x i8> %0,
189 <vscale x 8 x i8> %1,
190 <vscale x 8 x i8> %2,
191 <vscale x 8 x i1> %3,
194 ret <vscale x 8 x i8> %a
197 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(
203 define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
204 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8:
205 ; CHECK: # %bb.0: # %entry
206 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
207 ; CHECK-NEXT: vrgather.vv v12, v8, v10
208 ; CHECK-NEXT: vmv.v.v v8, v12
211 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(
212 <vscale x 16 x i8> undef,
213 <vscale x 16 x i8> %0,
214 <vscale x 16 x i8> %1,
217 ret <vscale x 16 x i8> %a
220 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(
228 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
229 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8:
230 ; CHECK: # %bb.0: # %entry
231 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
232 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
235 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(
236 <vscale x 16 x i8> %0,
237 <vscale x 16 x i8> %1,
238 <vscale x 16 x i8> %2,
239 <vscale x 16 x i1> %3,
242 ret <vscale x 16 x i8> %a
245 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(
251 define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
252 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8:
253 ; CHECK: # %bb.0: # %entry
254 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
255 ; CHECK-NEXT: vrgather.vv v16, v8, v12
256 ; CHECK-NEXT: vmv.v.v v8, v16
259 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(
260 <vscale x 32 x i8> undef,
261 <vscale x 32 x i8> %0,
262 <vscale x 32 x i8> %1,
265 ret <vscale x 32 x i8> %a
268 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(
276 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
277 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8:
278 ; CHECK: # %bb.0: # %entry
279 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
280 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
283 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(
284 <vscale x 32 x i8> %0,
285 <vscale x 32 x i8> %1,
286 <vscale x 32 x i8> %2,
287 <vscale x 32 x i1> %3,
290 ret <vscale x 32 x i8> %a
293 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(
299 define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
300 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8:
301 ; CHECK: # %bb.0: # %entry
302 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
303 ; CHECK-NEXT: vrgather.vv v24, v8, v16
304 ; CHECK-NEXT: vmv.v.v v8, v24
307 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(
308 <vscale x 64 x i8> undef,
309 <vscale x 64 x i8> %0,
310 <vscale x 64 x i8> %1,
313 ret <vscale x 64 x i8> %a
316 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(
324 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
325 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: vl8r.v v24, (a0)
328 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
329 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
332 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(
333 <vscale x 64 x i8> %0,
334 <vscale x 64 x i8> %1,
335 <vscale x 64 x i8> %2,
336 <vscale x 64 x i1> %3,
339 ret <vscale x 64 x i8> %a
342 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(
348 define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
349 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16:
350 ; CHECK: # %bb.0: # %entry
351 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
352 ; CHECK-NEXT: vrgather.vv v10, v8, v9
353 ; CHECK-NEXT: vmv1r.v v8, v10
356 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(
357 <vscale x 1 x i16> undef,
358 <vscale x 1 x i16> %0,
359 <vscale x 1 x i16> %1,
362 ret <vscale x 1 x i16> %a
365 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(
373 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
374 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16:
375 ; CHECK: # %bb.0: # %entry
376 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
377 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
380 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(
381 <vscale x 1 x i16> %0,
382 <vscale x 1 x i16> %1,
383 <vscale x 1 x i16> %2,
384 <vscale x 1 x i1> %3,
387 ret <vscale x 1 x i16> %a
390 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(
396 define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
397 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16:
398 ; CHECK: # %bb.0: # %entry
399 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
400 ; CHECK-NEXT: vrgather.vv v10, v8, v9
401 ; CHECK-NEXT: vmv1r.v v8, v10
404 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(
405 <vscale x 2 x i16> undef,
406 <vscale x 2 x i16> %0,
407 <vscale x 2 x i16> %1,
410 ret <vscale x 2 x i16> %a
413 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(
421 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
422 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16:
423 ; CHECK: # %bb.0: # %entry
424 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
425 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
428 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(
429 <vscale x 2 x i16> %0,
430 <vscale x 2 x i16> %1,
431 <vscale x 2 x i16> %2,
432 <vscale x 2 x i1> %3,
435 ret <vscale x 2 x i16> %a
438 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(
444 define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
445 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16:
446 ; CHECK: # %bb.0: # %entry
447 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
448 ; CHECK-NEXT: vrgather.vv v10, v8, v9
449 ; CHECK-NEXT: vmv.v.v v8, v10
452 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(
453 <vscale x 4 x i16> undef,
454 <vscale x 4 x i16> %0,
455 <vscale x 4 x i16> %1,
458 ret <vscale x 4 x i16> %a
461 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(
469 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
470 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16:
471 ; CHECK: # %bb.0: # %entry
472 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
473 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
476 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(
477 <vscale x 4 x i16> %0,
478 <vscale x 4 x i16> %1,
479 <vscale x 4 x i16> %2,
480 <vscale x 4 x i1> %3,
483 ret <vscale x 4 x i16> %a
486 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(
492 define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
493 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16:
494 ; CHECK: # %bb.0: # %entry
495 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
496 ; CHECK-NEXT: vrgather.vv v12, v8, v10
497 ; CHECK-NEXT: vmv.v.v v8, v12
500 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(
501 <vscale x 8 x i16> undef,
502 <vscale x 8 x i16> %0,
503 <vscale x 8 x i16> %1,
506 ret <vscale x 8 x i16> %a
509 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(
517 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
518 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16:
519 ; CHECK: # %bb.0: # %entry
520 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
521 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
524 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(
525 <vscale x 8 x i16> %0,
526 <vscale x 8 x i16> %1,
527 <vscale x 8 x i16> %2,
528 <vscale x 8 x i1> %3,
531 ret <vscale x 8 x i16> %a
534 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(
540 define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
541 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
544 ; CHECK-NEXT: vrgather.vv v16, v8, v12
545 ; CHECK-NEXT: vmv.v.v v8, v16
548 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(
549 <vscale x 16 x i16> undef,
550 <vscale x 16 x i16> %0,
551 <vscale x 16 x i16> %1,
554 ret <vscale x 16 x i16> %a
557 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(
565 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
566 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
569 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
572 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(
573 <vscale x 16 x i16> %0,
574 <vscale x 16 x i16> %1,
575 <vscale x 16 x i16> %2,
576 <vscale x 16 x i1> %3,
579 ret <vscale x 16 x i16> %a
582 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(
588 define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
589 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
592 ; CHECK-NEXT: vrgather.vv v24, v8, v16
593 ; CHECK-NEXT: vmv.v.v v8, v24
596 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(
597 <vscale x 32 x i16> undef,
598 <vscale x 32 x i16> %0,
599 <vscale x 32 x i16> %1,
602 ret <vscale x 32 x i16> %a
605 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(
613 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
614 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vl8re16.v v24, (a0)
617 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
618 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
621 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(
622 <vscale x 32 x i16> %0,
623 <vscale x 32 x i16> %1,
624 <vscale x 32 x i16> %2,
625 <vscale x 32 x i1> %3,
628 ret <vscale x 32 x i16> %a
631 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(
637 define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
638 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32:
639 ; CHECK: # %bb.0: # %entry
640 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
641 ; CHECK-NEXT: vrgather.vv v10, v8, v9
642 ; CHECK-NEXT: vmv1r.v v8, v10
645 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(
646 <vscale x 1 x i32> undef,
647 <vscale x 1 x i32> %0,
648 <vscale x 1 x i32> %1,
651 ret <vscale x 1 x i32> %a
654 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(
662 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
663 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32:
664 ; CHECK: # %bb.0: # %entry
665 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
666 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
669 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(
670 <vscale x 1 x i32> %0,
671 <vscale x 1 x i32> %1,
672 <vscale x 1 x i32> %2,
673 <vscale x 1 x i1> %3,
676 ret <vscale x 1 x i32> %a
679 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(
685 define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
686 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32:
687 ; CHECK: # %bb.0: # %entry
688 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
689 ; CHECK-NEXT: vrgather.vv v10, v8, v9
690 ; CHECK-NEXT: vmv.v.v v8, v10
693 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(
694 <vscale x 2 x i32> undef,
695 <vscale x 2 x i32> %0,
696 <vscale x 2 x i32> %1,
699 ret <vscale x 2 x i32> %a
702 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(
710 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
711 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
714 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
717 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(
718 <vscale x 2 x i32> %0,
719 <vscale x 2 x i32> %1,
720 <vscale x 2 x i32> %2,
721 <vscale x 2 x i1> %3,
724 ret <vscale x 2 x i32> %a
727 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(
733 define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
734 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32:
735 ; CHECK: # %bb.0: # %entry
736 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
737 ; CHECK-NEXT: vrgather.vv v12, v8, v10
738 ; CHECK-NEXT: vmv.v.v v8, v12
741 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(
742 <vscale x 4 x i32> undef,
743 <vscale x 4 x i32> %0,
744 <vscale x 4 x i32> %1,
747 ret <vscale x 4 x i32> %a
750 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(
758 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
759 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32:
760 ; CHECK: # %bb.0: # %entry
761 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
762 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
765 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(
766 <vscale x 4 x i32> %0,
767 <vscale x 4 x i32> %1,
768 <vscale x 4 x i32> %2,
769 <vscale x 4 x i1> %3,
772 ret <vscale x 4 x i32> %a
775 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(
781 define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
782 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32:
783 ; CHECK: # %bb.0: # %entry
784 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
785 ; CHECK-NEXT: vrgather.vv v16, v8, v12
786 ; CHECK-NEXT: vmv.v.v v8, v16
789 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(
790 <vscale x 8 x i32> undef,
791 <vscale x 8 x i32> %0,
792 <vscale x 8 x i32> %1,
795 ret <vscale x 8 x i32> %a
798 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(
806 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
807 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32:
808 ; CHECK: # %bb.0: # %entry
809 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
810 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
813 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(
814 <vscale x 8 x i32> %0,
815 <vscale x 8 x i32> %1,
816 <vscale x 8 x i32> %2,
817 <vscale x 8 x i1> %3,
820 ret <vscale x 8 x i32> %a
823 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(
829 define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
830 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32:
831 ; CHECK: # %bb.0: # %entry
832 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
833 ; CHECK-NEXT: vrgather.vv v24, v8, v16
834 ; CHECK-NEXT: vmv.v.v v8, v24
837 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(
838 <vscale x 16 x i32> undef,
839 <vscale x 16 x i32> %0,
840 <vscale x 16 x i32> %1,
843 ret <vscale x 16 x i32> %a
846 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(
854 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
855 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
856 ; CHECK: # %bb.0: # %entry
857 ; CHECK-NEXT: vl8re32.v v24, (a0)
858 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
859 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
862 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(
863 <vscale x 16 x i32> %0,
864 <vscale x 16 x i32> %1,
865 <vscale x 16 x i32> %2,
866 <vscale x 16 x i1> %3,
869 ret <vscale x 16 x i32> %a
872 declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(
878 define <vscale x 1 x i64> @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
879 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64:
880 ; CHECK: # %bb.0: # %entry
881 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
882 ; CHECK-NEXT: vrgather.vv v10, v8, v9
883 ; CHECK-NEXT: vmv.v.v v8, v10
886 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(
887 <vscale x 1 x i64> undef,
888 <vscale x 1 x i64> %0,
889 <vscale x 1 x i64> %1,
892 ret <vscale x 1 x i64> %a
895 declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(
903 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
904 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64:
905 ; CHECK: # %bb.0: # %entry
906 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
907 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
910 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(
911 <vscale x 1 x i64> %0,
912 <vscale x 1 x i64> %1,
913 <vscale x 1 x i64> %2,
914 <vscale x 1 x i1> %3,
917 ret <vscale x 1 x i64> %a
920 declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(
926 define <vscale x 2 x i64> @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
927 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64:
928 ; CHECK: # %bb.0: # %entry
929 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
930 ; CHECK-NEXT: vrgather.vv v12, v8, v10
931 ; CHECK-NEXT: vmv.v.v v8, v12
934 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(
935 <vscale x 2 x i64> undef,
936 <vscale x 2 x i64> %0,
937 <vscale x 2 x i64> %1,
940 ret <vscale x 2 x i64> %a
943 declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(
951 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
952 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64:
953 ; CHECK: # %bb.0: # %entry
954 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
955 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
958 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(
959 <vscale x 2 x i64> %0,
960 <vscale x 2 x i64> %1,
961 <vscale x 2 x i64> %2,
962 <vscale x 2 x i1> %3,
965 ret <vscale x 2 x i64> %a
968 declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(
974 define <vscale x 4 x i64> @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
975 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64:
976 ; CHECK: # %bb.0: # %entry
977 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
978 ; CHECK-NEXT: vrgather.vv v16, v8, v12
979 ; CHECK-NEXT: vmv.v.v v8, v16
982 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(
983 <vscale x 4 x i64> undef,
984 <vscale x 4 x i64> %0,
985 <vscale x 4 x i64> %1,
988 ret <vscale x 4 x i64> %a
991 declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(
999 define <vscale x 4 x i64> @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1000 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64:
1001 ; CHECK: # %bb.0: # %entry
1002 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1003 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1006 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(
1007 <vscale x 4 x i64> %0,
1008 <vscale x 4 x i64> %1,
1009 <vscale x 4 x i64> %2,
1010 <vscale x 4 x i1> %3,
1013 ret <vscale x 4 x i64> %a
1016 declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(
1022 define <vscale x 8 x i64> @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
1023 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64:
1024 ; CHECK: # %bb.0: # %entry
1025 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1026 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1027 ; CHECK-NEXT: vmv.v.v v8, v24
1030 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(
1031 <vscale x 8 x i64> undef,
1032 <vscale x 8 x i64> %0,
1033 <vscale x 8 x i64> %1,
1036 ret <vscale x 8 x i64> %a
1039 declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(
1047 define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1048 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1049 ; CHECK: # %bb.0: # %entry
1050 ; CHECK-NEXT: vl8re64.v v24, (a0)
1051 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1052 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1055 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(
1056 <vscale x 8 x i64> %0,
1057 <vscale x 8 x i64> %1,
1058 <vscale x 8 x i64> %2,
1059 <vscale x 8 x i1> %3,
1062 ret <vscale x 8 x i64> %a
1065 declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(
1066 <vscale x 1 x half>,
1067 <vscale x 1 x half>,
1071 define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
1072 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16:
1073 ; CHECK: # %bb.0: # %entry
1074 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1075 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1076 ; CHECK-NEXT: vmv1r.v v8, v10
1079 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.i64(
1080 <vscale x 1 x half> undef,
1081 <vscale x 1 x half> %0,
1082 <vscale x 1 x i16> %1,
1085 ret <vscale x 1 x half> %a
1088 declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i64(
1089 <vscale x 1 x half>,
1090 <vscale x 1 x half>,
1096 define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1097 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16:
1098 ; CHECK: # %bb.0: # %entry
1099 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
1100 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1103 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.i64(
1104 <vscale x 1 x half> %0,
1105 <vscale x 1 x half> %1,
1106 <vscale x 1 x i16> %2,
1107 <vscale x 1 x i1> %3,
1110 ret <vscale x 1 x half> %a
1113 declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(
1114 <vscale x 2 x half>,
1115 <vscale x 2 x half>,
1119 define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
1120 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16:
1121 ; CHECK: # %bb.0: # %entry
1122 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1123 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1124 ; CHECK-NEXT: vmv1r.v v8, v10
1127 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.i64(
1128 <vscale x 2 x half> undef,
1129 <vscale x 2 x half> %0,
1130 <vscale x 2 x i16> %1,
1133 ret <vscale x 2 x half> %a
1136 declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i64(
1137 <vscale x 2 x half>,
1138 <vscale x 2 x half>,
1144 define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1145 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16:
1146 ; CHECK: # %bb.0: # %entry
1147 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
1148 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1151 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.i64(
1152 <vscale x 2 x half> %0,
1153 <vscale x 2 x half> %1,
1154 <vscale x 2 x i16> %2,
1155 <vscale x 2 x i1> %3,
1158 ret <vscale x 2 x half> %a
1161 declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(
1162 <vscale x 4 x half>,
1163 <vscale x 4 x half>,
1167 define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
1168 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16:
1169 ; CHECK: # %bb.0: # %entry
1170 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1171 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1172 ; CHECK-NEXT: vmv.v.v v8, v10
1175 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.i64(
1176 <vscale x 4 x half> undef,
1177 <vscale x 4 x half> %0,
1178 <vscale x 4 x i16> %1,
1181 ret <vscale x 4 x half> %a
1184 declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i64(
1185 <vscale x 4 x half>,
1186 <vscale x 4 x half>,
1192 define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1193 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16:
1194 ; CHECK: # %bb.0: # %entry
1195 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1196 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1199 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.i64(
1200 <vscale x 4 x half> %0,
1201 <vscale x 4 x half> %1,
1202 <vscale x 4 x i16> %2,
1203 <vscale x 4 x i1> %3,
1206 ret <vscale x 4 x half> %a
1209 declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(
1210 <vscale x 8 x half>,
1211 <vscale x 8 x half>,
1215 define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
1216 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16:
1217 ; CHECK: # %bb.0: # %entry
1218 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1219 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1220 ; CHECK-NEXT: vmv.v.v v8, v12
1223 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.i64(
1224 <vscale x 8 x half> undef,
1225 <vscale x 8 x half> %0,
1226 <vscale x 8 x i16> %1,
1229 ret <vscale x 8 x half> %a
1232 declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i64(
1233 <vscale x 8 x half>,
1234 <vscale x 8 x half>,
1240 define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1241 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16:
1242 ; CHECK: # %bb.0: # %entry
1243 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1244 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1247 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.i64(
1248 <vscale x 8 x half> %0,
1249 <vscale x 8 x half> %1,
1250 <vscale x 8 x i16> %2,
1251 <vscale x 8 x i1> %3,
1254 ret <vscale x 8 x half> %a
1257 declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(
1258 <vscale x 16 x half>,
1259 <vscale x 16 x half>,
1260 <vscale x 16 x i16>,
1263 define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
1264 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16:
1265 ; CHECK: # %bb.0: # %entry
1266 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1267 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1268 ; CHECK-NEXT: vmv.v.v v8, v16
1271 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.i64(
1272 <vscale x 16 x half> undef,
1273 <vscale x 16 x half> %0,
1274 <vscale x 16 x i16> %1,
1277 ret <vscale x 16 x half> %a
1280 declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i64(
1281 <vscale x 16 x half>,
1282 <vscale x 16 x half>,
1283 <vscale x 16 x i16>,
1288 define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1289 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16:
1290 ; CHECK: # %bb.0: # %entry
1291 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1292 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1295 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.i64(
1296 <vscale x 16 x half> %0,
1297 <vscale x 16 x half> %1,
1298 <vscale x 16 x i16> %2,
1299 <vscale x 16 x i1> %3,
1302 ret <vscale x 16 x half> %a
1305 declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(
1306 <vscale x 32 x half>,
1307 <vscale x 32 x half>,
1308 <vscale x 32 x i16>,
1311 define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
1312 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16:
1313 ; CHECK: # %bb.0: # %entry
1314 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1315 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1316 ; CHECK-NEXT: vmv.v.v v8, v24
1319 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.i64(
1320 <vscale x 32 x half> undef,
1321 <vscale x 32 x half> %0,
1322 <vscale x 32 x i16> %1,
1325 ret <vscale x 32 x half> %a
1328 declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i64(
1329 <vscale x 32 x half>,
1330 <vscale x 32 x half>,
1331 <vscale x 32 x i16>,
1336 define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1337 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
1338 ; CHECK: # %bb.0: # %entry
1339 ; CHECK-NEXT: vl8re16.v v24, (a0)
1340 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1341 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1344 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.i64(
1345 <vscale x 32 x half> %0,
1346 <vscale x 32 x half> %1,
1347 <vscale x 32 x i16> %2,
1348 <vscale x 32 x i1> %3,
1351 ret <vscale x 32 x half> %a
1354 declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(
1355 <vscale x 1 x float>,
1356 <vscale x 1 x float>,
1360 define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
1361 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32:
1362 ; CHECK: # %bb.0: # %entry
1363 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1364 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1365 ; CHECK-NEXT: vmv1r.v v8, v10
1368 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(
1369 <vscale x 1 x float> undef,
1370 <vscale x 1 x float> %0,
1371 <vscale x 1 x i32> %1,
1374 ret <vscale x 1 x float> %a
1377 declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(
1378 <vscale x 1 x float>,
1379 <vscale x 1 x float>,
1385 define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1386 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32:
1387 ; CHECK: # %bb.0: # %entry
1388 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1389 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1392 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(
1393 <vscale x 1 x float> %0,
1394 <vscale x 1 x float> %1,
1395 <vscale x 1 x i32> %2,
1396 <vscale x 1 x i1> %3,
1399 ret <vscale x 1 x float> %a
1402 declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(
1403 <vscale x 2 x float>,
1404 <vscale x 2 x float>,
1408 define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
1409 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32:
1410 ; CHECK: # %bb.0: # %entry
1411 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1412 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1413 ; CHECK-NEXT: vmv.v.v v8, v10
1416 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(
1417 <vscale x 2 x float> undef,
1418 <vscale x 2 x float> %0,
1419 <vscale x 2 x i32> %1,
1422 ret <vscale x 2 x float> %a
1425 declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(
1426 <vscale x 2 x float>,
1427 <vscale x 2 x float>,
1433 define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1434 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32:
1435 ; CHECK: # %bb.0: # %entry
1436 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1437 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1440 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(
1441 <vscale x 2 x float> %0,
1442 <vscale x 2 x float> %1,
1443 <vscale x 2 x i32> %2,
1444 <vscale x 2 x i1> %3,
1447 ret <vscale x 2 x float> %a
1450 declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(
1451 <vscale x 4 x float>,
1452 <vscale x 4 x float>,
1456 define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
1457 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32:
1458 ; CHECK: # %bb.0: # %entry
1459 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1460 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1461 ; CHECK-NEXT: vmv.v.v v8, v12
1464 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(
1465 <vscale x 4 x float> undef,
1466 <vscale x 4 x float> %0,
1467 <vscale x 4 x i32> %1,
1470 ret <vscale x 4 x float> %a
1473 declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(
1474 <vscale x 4 x float>,
1475 <vscale x 4 x float>,
1481 define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1482 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32:
1483 ; CHECK: # %bb.0: # %entry
1484 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1485 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1488 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(
1489 <vscale x 4 x float> %0,
1490 <vscale x 4 x float> %1,
1491 <vscale x 4 x i32> %2,
1492 <vscale x 4 x i1> %3,
1495 ret <vscale x 4 x float> %a
1498 declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(
1499 <vscale x 8 x float>,
1500 <vscale x 8 x float>,
1504 define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
1505 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32:
1506 ; CHECK: # %bb.0: # %entry
1507 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1508 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1509 ; CHECK-NEXT: vmv.v.v v8, v16
1512 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(
1513 <vscale x 8 x float> undef,
1514 <vscale x 8 x float> %0,
1515 <vscale x 8 x i32> %1,
1518 ret <vscale x 8 x float> %a
1521 declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(
1522 <vscale x 8 x float>,
1523 <vscale x 8 x float>,
1529 define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1530 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32:
1531 ; CHECK: # %bb.0: # %entry
1532 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1533 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1536 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(
1537 <vscale x 8 x float> %0,
1538 <vscale x 8 x float> %1,
1539 <vscale x 8 x i32> %2,
1540 <vscale x 8 x i1> %3,
1543 ret <vscale x 8 x float> %a
1546 declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(
1547 <vscale x 16 x float>,
1548 <vscale x 16 x float>,
1549 <vscale x 16 x i32>,
1552 define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
1553 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32:
1554 ; CHECK: # %bb.0: # %entry
1555 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1556 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1557 ; CHECK-NEXT: vmv.v.v v8, v24
1560 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(
1561 <vscale x 16 x float> undef,
1562 <vscale x 16 x float> %0,
1563 <vscale x 16 x i32> %1,
1566 ret <vscale x 16 x float> %a
1569 declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(
1570 <vscale x 16 x float>,
1571 <vscale x 16 x float>,
1572 <vscale x 16 x i32>,
1577 define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1578 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
1579 ; CHECK: # %bb.0: # %entry
1580 ; CHECK-NEXT: vl8re32.v v24, (a0)
1581 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1582 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1585 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(
1586 <vscale x 16 x float> %0,
1587 <vscale x 16 x float> %1,
1588 <vscale x 16 x i32> %2,
1589 <vscale x 16 x i1> %3,
1592 ret <vscale x 16 x float> %a
1595 declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(
1596 <vscale x 1 x double>,
1597 <vscale x 1 x double>,
1601 define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
1602 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64:
1603 ; CHECK: # %bb.0: # %entry
1604 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1605 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1606 ; CHECK-NEXT: vmv.v.v v8, v10
1609 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(
1610 <vscale x 1 x double> undef,
1611 <vscale x 1 x double> %0,
1612 <vscale x 1 x i64> %1,
1615 ret <vscale x 1 x double> %a
1618 declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(
1619 <vscale x 1 x double>,
1620 <vscale x 1 x double>,
1626 define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1627 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64:
1628 ; CHECK: # %bb.0: # %entry
1629 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1630 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1633 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(
1634 <vscale x 1 x double> %0,
1635 <vscale x 1 x double> %1,
1636 <vscale x 1 x i64> %2,
1637 <vscale x 1 x i1> %3,
1640 ret <vscale x 1 x double> %a
1643 declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(
1644 <vscale x 2 x double>,
1645 <vscale x 2 x double>,
1649 define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
1650 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64:
1651 ; CHECK: # %bb.0: # %entry
1652 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1653 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1654 ; CHECK-NEXT: vmv.v.v v8, v12
1657 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(
1658 <vscale x 2 x double> undef,
1659 <vscale x 2 x double> %0,
1660 <vscale x 2 x i64> %1,
1663 ret <vscale x 2 x double> %a
1666 declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(
1667 <vscale x 2 x double>,
1668 <vscale x 2 x double>,
1674 define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1675 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64:
1676 ; CHECK: # %bb.0: # %entry
1677 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1678 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1681 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(
1682 <vscale x 2 x double> %0,
1683 <vscale x 2 x double> %1,
1684 <vscale x 2 x i64> %2,
1685 <vscale x 2 x i1> %3,
1688 ret <vscale x 2 x double> %a
1691 declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(
1692 <vscale x 4 x double>,
1693 <vscale x 4 x double>,
1697 define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
1698 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64:
1699 ; CHECK: # %bb.0: # %entry
1700 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1701 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1702 ; CHECK-NEXT: vmv.v.v v8, v16
1705 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(
1706 <vscale x 4 x double> undef,
1707 <vscale x 4 x double> %0,
1708 <vscale x 4 x i64> %1,
1711 ret <vscale x 4 x double> %a
1714 declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(
1715 <vscale x 4 x double>,
1716 <vscale x 4 x double>,
1722 define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1723 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64:
1724 ; CHECK: # %bb.0: # %entry
1725 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1726 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1729 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(
1730 <vscale x 4 x double> %0,
1731 <vscale x 4 x double> %1,
1732 <vscale x 4 x i64> %2,
1733 <vscale x 4 x i1> %3,
1736 ret <vscale x 4 x double> %a
1739 declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(
1740 <vscale x 8 x double>,
1741 <vscale x 8 x double>,
1745 define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
1746 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64:
1747 ; CHECK: # %bb.0: # %entry
1748 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1749 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1750 ; CHECK-NEXT: vmv.v.v v8, v24
1753 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(
1754 <vscale x 8 x double> undef,
1755 <vscale x 8 x double> %0,
1756 <vscale x 8 x i64> %1,
1759 ret <vscale x 8 x double> %a
1762 declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(
1763 <vscale x 8 x double>,
1764 <vscale x 8 x double>,
1770 define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1771 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
1772 ; CHECK: # %bb.0: # %entry
1773 ; CHECK-NEXT: vl8re64.v v24, (a0)
1774 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1775 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1778 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(
1779 <vscale x 8 x double> %0,
1780 <vscale x 8 x double> %1,
1781 <vscale x 8 x i64> %2,
1782 <vscale x 8 x i1> %3,
1785 ret <vscale x 8 x double> %a
1788 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
1794 define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
1795 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64:
1796 ; CHECK: # %bb.0: # %entry
1797 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1798 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1799 ; CHECK-NEXT: vmv1r.v v8, v9
1802 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
1803 <vscale x 1 x i8> undef,
1804 <vscale x 1 x i8> %0,
1808 ret <vscale x 1 x i8> %a
1811 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
1819 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i64(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1820 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i64:
1821 ; CHECK: # %bb.0: # %entry
1822 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1823 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1826 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
1827 <vscale x 1 x i8> %0,
1828 <vscale x 1 x i8> %1,
1830 <vscale x 1 x i1> %3,
1833 ret <vscale x 1 x i8> %a
1836 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
1842 define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
1843 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64:
1844 ; CHECK: # %bb.0: # %entry
1845 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1846 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1847 ; CHECK-NEXT: vmv1r.v v8, v9
1850 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
1851 <vscale x 2 x i8> undef,
1852 <vscale x 2 x i8> %0,
1856 ret <vscale x 2 x i8> %a
1859 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
1867 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i64(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1868 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i64:
1869 ; CHECK: # %bb.0: # %entry
1870 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1871 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1874 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
1875 <vscale x 2 x i8> %0,
1876 <vscale x 2 x i8> %1,
1878 <vscale x 2 x i1> %3,
1881 ret <vscale x 2 x i8> %a
1884 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
1890 define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
1891 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64:
1892 ; CHECK: # %bb.0: # %entry
1893 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1894 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1895 ; CHECK-NEXT: vmv1r.v v8, v9
1898 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
1899 <vscale x 4 x i8> undef,
1900 <vscale x 4 x i8> %0,
1904 ret <vscale x 4 x i8> %a
1907 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
1915 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i64(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1916 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i64:
1917 ; CHECK: # %bb.0: # %entry
1918 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1919 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1922 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
1923 <vscale x 4 x i8> %0,
1924 <vscale x 4 x i8> %1,
1926 <vscale x 4 x i1> %3,
1929 ret <vscale x 4 x i8> %a
1932 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
1938 define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
1939 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64:
1940 ; CHECK: # %bb.0: # %entry
1941 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1942 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1943 ; CHECK-NEXT: vmv.v.v v8, v9
1946 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
1947 <vscale x 8 x i8> undef,
1948 <vscale x 8 x i8> %0,
1952 ret <vscale x 8 x i8> %a
1955 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
1963 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i64(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1964 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i64:
1965 ; CHECK: # %bb.0: # %entry
1966 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1967 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1970 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
1971 <vscale x 8 x i8> %0,
1972 <vscale x 8 x i8> %1,
1974 <vscale x 8 x i1> %3,
1977 ret <vscale x 8 x i8> %a
1980 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
1986 define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
1987 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64:
1988 ; CHECK: # %bb.0: # %entry
1989 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1990 ; CHECK-NEXT: vrgather.vx v10, v8, a0
1991 ; CHECK-NEXT: vmv.v.v v8, v10
1994 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
1995 <vscale x 16 x i8> undef,
1996 <vscale x 16 x i8> %0,
2000 ret <vscale x 16 x i8> %a
2003 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
2011 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i64(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
2012 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i64:
2013 ; CHECK: # %bb.0: # %entry
2014 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
2015 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2018 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
2019 <vscale x 16 x i8> %0,
2020 <vscale x 16 x i8> %1,
2022 <vscale x 16 x i1> %3,
2025 ret <vscale x 16 x i8> %a
2028 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
2034 define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
2035 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64:
2036 ; CHECK: # %bb.0: # %entry
2037 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2038 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2039 ; CHECK-NEXT: vmv.v.v v8, v12
2042 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
2043 <vscale x 32 x i8> undef,
2044 <vscale x 32 x i8> %0,
2048 ret <vscale x 32 x i8> %a
2051 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
2059 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i64(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
2060 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i64:
2061 ; CHECK: # %bb.0: # %entry
2062 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
2063 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2066 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
2067 <vscale x 32 x i8> %0,
2068 <vscale x 32 x i8> %1,
2070 <vscale x 32 x i1> %3,
2073 ret <vscale x 32 x i8> %a
2076 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
2082 define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
2083 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64:
2084 ; CHECK: # %bb.0: # %entry
2085 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
2086 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2087 ; CHECK-NEXT: vmv.v.v v8, v16
2090 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
2091 <vscale x 64 x i8> undef,
2092 <vscale x 64 x i8> %0,
2096 ret <vscale x 64 x i8> %a
2099 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
2107 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i64(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
2108 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i64:
2109 ; CHECK: # %bb.0: # %entry
2110 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
2111 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2114 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
2115 <vscale x 64 x i8> %0,
2116 <vscale x 64 x i8> %1,
2118 <vscale x 64 x i1> %3,
2121 ret <vscale x 64 x i8> %a
2124 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
2130 define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
2131 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64:
2132 ; CHECK: # %bb.0: # %entry
2133 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2134 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2135 ; CHECK-NEXT: vmv1r.v v8, v9
2138 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
2139 <vscale x 1 x i16> undef,
2140 <vscale x 1 x i16> %0,
2144 ret <vscale x 1 x i16> %a
2147 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
2155 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i64(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
2156 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i64:
2157 ; CHECK: # %bb.0: # %entry
2158 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2159 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2162 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
2163 <vscale x 1 x i16> %0,
2164 <vscale x 1 x i16> %1,
2166 <vscale x 1 x i1> %3,
2169 ret <vscale x 1 x i16> %a
2172 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
2178 define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
2179 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64:
2180 ; CHECK: # %bb.0: # %entry
2181 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2182 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2183 ; CHECK-NEXT: vmv1r.v v8, v9
2186 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
2187 <vscale x 2 x i16> undef,
2188 <vscale x 2 x i16> %0,
2192 ret <vscale x 2 x i16> %a
2195 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
2203 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i64(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
2204 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i64:
2205 ; CHECK: # %bb.0: # %entry
2206 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2207 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2210 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
2211 <vscale x 2 x i16> %0,
2212 <vscale x 2 x i16> %1,
2214 <vscale x 2 x i1> %3,
2217 ret <vscale x 2 x i16> %a
2220 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
2226 define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
2227 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64:
2228 ; CHECK: # %bb.0: # %entry
2229 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2230 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2231 ; CHECK-NEXT: vmv.v.v v8, v9
2234 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
2235 <vscale x 4 x i16> undef,
2236 <vscale x 4 x i16> %0,
2240 ret <vscale x 4 x i16> %a
2243 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
2251 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i64(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
2252 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i64:
2253 ; CHECK: # %bb.0: # %entry
2254 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2255 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2258 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
2259 <vscale x 4 x i16> %0,
2260 <vscale x 4 x i16> %1,
2262 <vscale x 4 x i1> %3,
2265 ret <vscale x 4 x i16> %a
2268 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
2274 define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
2275 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64:
2276 ; CHECK: # %bb.0: # %entry
2277 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2278 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2279 ; CHECK-NEXT: vmv.v.v v8, v10
2282 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
2283 <vscale x 8 x i16> undef,
2284 <vscale x 8 x i16> %0,
2288 ret <vscale x 8 x i16> %a
2291 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
2299 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i64(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
2300 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i64:
2301 ; CHECK: # %bb.0: # %entry
2302 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2303 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2306 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
2307 <vscale x 8 x i16> %0,
2308 <vscale x 8 x i16> %1,
2310 <vscale x 8 x i1> %3,
2313 ret <vscale x 8 x i16> %a
2316 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
2317 <vscale x 16 x i16>,
2318 <vscale x 16 x i16>,
2322 define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
2323 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64:
2324 ; CHECK: # %bb.0: # %entry
2325 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2326 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2327 ; CHECK-NEXT: vmv.v.v v8, v12
2330 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
2331 <vscale x 16 x i16> undef,
2332 <vscale x 16 x i16> %0,
2336 ret <vscale x 16 x i16> %a
2339 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
2340 <vscale x 16 x i16>,
2341 <vscale x 16 x i16>,
2347 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i64(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
2348 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i64:
2349 ; CHECK: # %bb.0: # %entry
2350 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
2351 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2354 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
2355 <vscale x 16 x i16> %0,
2356 <vscale x 16 x i16> %1,
2358 <vscale x 16 x i1> %3,
2361 ret <vscale x 16 x i16> %a
2364 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
2365 <vscale x 32 x i16>,
2366 <vscale x 32 x i16>,
2370 define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
2371 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64:
2372 ; CHECK: # %bb.0: # %entry
2373 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2374 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2375 ; CHECK-NEXT: vmv.v.v v8, v16
2378 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
2379 <vscale x 32 x i16> undef,
2380 <vscale x 32 x i16> %0,
2384 ret <vscale x 32 x i16> %a
2387 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
2388 <vscale x 32 x i16>,
2389 <vscale x 32 x i16>,
2395 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i64(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
2396 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i64:
2397 ; CHECK: # %bb.0: # %entry
2398 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2399 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2402 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
2403 <vscale x 32 x i16> %0,
2404 <vscale x 32 x i16> %1,
2406 <vscale x 32 x i1> %3,
2409 ret <vscale x 32 x i16> %a
2412 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
2418 define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
2419 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64:
2420 ; CHECK: # %bb.0: # %entry
2421 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2422 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2423 ; CHECK-NEXT: vmv1r.v v8, v9
2426 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
2427 <vscale x 1 x i32> undef,
2428 <vscale x 1 x i32> %0,
2432 ret <vscale x 1 x i32> %a
2435 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
2443 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i64(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
2444 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i64:
2445 ; CHECK: # %bb.0: # %entry
2446 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2447 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2450 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
2451 <vscale x 1 x i32> %0,
2452 <vscale x 1 x i32> %1,
2454 <vscale x 1 x i1> %3,
2457 ret <vscale x 1 x i32> %a
2460 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
2466 define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
2467 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64:
2468 ; CHECK: # %bb.0: # %entry
2469 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2470 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2471 ; CHECK-NEXT: vmv.v.v v8, v9
2474 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
2475 <vscale x 2 x i32> undef,
2476 <vscale x 2 x i32> %0,
2480 ret <vscale x 2 x i32> %a
2483 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
2491 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i64(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
2492 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i64:
2493 ; CHECK: # %bb.0: # %entry
2494 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
2495 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2498 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
2499 <vscale x 2 x i32> %0,
2500 <vscale x 2 x i32> %1,
2502 <vscale x 2 x i1> %3,
2505 ret <vscale x 2 x i32> %a
2508 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
2514 define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
2515 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64:
2516 ; CHECK: # %bb.0: # %entry
2517 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2518 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2519 ; CHECK-NEXT: vmv.v.v v8, v10
2522 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
2523 <vscale x 4 x i32> undef,
2524 <vscale x 4 x i32> %0,
2528 ret <vscale x 4 x i32> %a
2531 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
2539 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i64(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
2540 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i64:
2541 ; CHECK: # %bb.0: # %entry
2542 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2543 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2546 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
2547 <vscale x 4 x i32> %0,
2548 <vscale x 4 x i32> %1,
2550 <vscale x 4 x i1> %3,
2553 ret <vscale x 4 x i32> %a
2556 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
2562 define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
2563 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64:
2564 ; CHECK: # %bb.0: # %entry
2565 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2566 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2567 ; CHECK-NEXT: vmv.v.v v8, v12
2570 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
2571 <vscale x 8 x i32> undef,
2572 <vscale x 8 x i32> %0,
2576 ret <vscale x 8 x i32> %a
2579 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
2587 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i64(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
2588 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i64:
2589 ; CHECK: # %bb.0: # %entry
2590 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
2591 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2594 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
2595 <vscale x 8 x i32> %0,
2596 <vscale x 8 x i32> %1,
2598 <vscale x 8 x i1> %3,
2601 ret <vscale x 8 x i32> %a
2604 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
2605 <vscale x 16 x i32>,
2606 <vscale x 16 x i32>,
2610 define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
2611 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64:
2612 ; CHECK: # %bb.0: # %entry
2613 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2614 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2615 ; CHECK-NEXT: vmv.v.v v8, v16
2618 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
2619 <vscale x 16 x i32> undef,
2620 <vscale x 16 x i32> %0,
2624 ret <vscale x 16 x i32> %a
2627 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
2628 <vscale x 16 x i32>,
2629 <vscale x 16 x i32>,
2635 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i64(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
2636 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i64:
2637 ; CHECK: # %bb.0: # %entry
2638 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
2639 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2642 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
2643 <vscale x 16 x i32> %0,
2644 <vscale x 16 x i32> %1,
2646 <vscale x 16 x i1> %3,
2649 ret <vscale x 16 x i32> %a
2652 declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
2658 define <vscale x 1 x i64> @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
2659 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64:
2660 ; CHECK: # %bb.0: # %entry
2661 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2662 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2663 ; CHECK-NEXT: vmv.v.v v8, v9
2666 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
2667 <vscale x 1 x i64> undef,
2668 <vscale x 1 x i64> %0,
2672 ret <vscale x 1 x i64> %a
2675 declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
2683 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
2684 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64:
2685 ; CHECK: # %bb.0: # %entry
2686 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2687 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2690 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
2691 <vscale x 1 x i64> %0,
2692 <vscale x 1 x i64> %1,
2694 <vscale x 1 x i1> %3,
2697 ret <vscale x 1 x i64> %a
2700 declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
2706 define <vscale x 2 x i64> @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
2707 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64:
2708 ; CHECK: # %bb.0: # %entry
2709 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2710 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2711 ; CHECK-NEXT: vmv.v.v v8, v10
2714 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
2715 <vscale x 2 x i64> undef,
2716 <vscale x 2 x i64> %0,
2720 ret <vscale x 2 x i64> %a
2723 declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
2731 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
2732 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64:
2733 ; CHECK: # %bb.0: # %entry
2734 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2735 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2738 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
2739 <vscale x 2 x i64> %0,
2740 <vscale x 2 x i64> %1,
2742 <vscale x 2 x i1> %3,
2745 ret <vscale x 2 x i64> %a
2748 declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
2754 define <vscale x 4 x i64> @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
2755 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64:
2756 ; CHECK: # %bb.0: # %entry
2757 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2758 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2759 ; CHECK-NEXT: vmv.v.v v8, v12
2762 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
2763 <vscale x 4 x i64> undef,
2764 <vscale x 4 x i64> %0,
2768 ret <vscale x 4 x i64> %a
2771 declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
2779 define <vscale x 4 x i64> @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
2780 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64:
2781 ; CHECK: # %bb.0: # %entry
2782 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2783 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2786 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
2787 <vscale x 4 x i64> %0,
2788 <vscale x 4 x i64> %1,
2790 <vscale x 4 x i1> %3,
2793 ret <vscale x 4 x i64> %a
2796 declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
2802 define <vscale x 8 x i64> @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
2803 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64:
2804 ; CHECK: # %bb.0: # %entry
2805 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2806 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2807 ; CHECK-NEXT: vmv.v.v v8, v16
2810 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
2811 <vscale x 8 x i64> undef,
2812 <vscale x 8 x i64> %0,
2816 ret <vscale x 8 x i64> %a
2819 declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
2827 define <vscale x 8 x i64> @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
2828 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64:
2829 ; CHECK: # %bb.0: # %entry
2830 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2831 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2834 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
2835 <vscale x 8 x i64> %0,
2836 <vscale x 8 x i64> %1,
2838 <vscale x 8 x i1> %3,
2841 ret <vscale x 8 x i64> %a
2844 declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
2845 <vscale x 1 x half>,
2846 <vscale x 1 x half>,
2850 define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64(<vscale x 1 x half> %0, i64 %1, i64 %2) nounwind {
2851 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64:
2852 ; CHECK: # %bb.0: # %entry
2853 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2854 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2855 ; CHECK-NEXT: vmv1r.v v8, v9
2858 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
2859 <vscale x 1 x half> undef,
2860 <vscale x 1 x half> %0,
2864 ret <vscale x 1 x half> %a
2867 declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
2868 <vscale x 1 x half>,
2869 <vscale x 1 x half>,
2875 define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i64(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
2876 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i64:
2877 ; CHECK: # %bb.0: # %entry
2878 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2879 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2882 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
2883 <vscale x 1 x half> %0,
2884 <vscale x 1 x half> %1,
2886 <vscale x 1 x i1> %3,
2889 ret <vscale x 1 x half> %a
2892 declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
2893 <vscale x 2 x half>,
2894 <vscale x 2 x half>,
2898 define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64(<vscale x 2 x half> %0, i64 %1, i64 %2) nounwind {
2899 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64:
2900 ; CHECK: # %bb.0: # %entry
2901 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2902 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2903 ; CHECK-NEXT: vmv1r.v v8, v9
2906 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
2907 <vscale x 2 x half> undef,
2908 <vscale x 2 x half> %0,
2912 ret <vscale x 2 x half> %a
2915 declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
2916 <vscale x 2 x half>,
2917 <vscale x 2 x half>,
2923 define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i64(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
2924 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i64:
2925 ; CHECK: # %bb.0: # %entry
2926 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2927 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2930 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
2931 <vscale x 2 x half> %0,
2932 <vscale x 2 x half> %1,
2934 <vscale x 2 x i1> %3,
2937 ret <vscale x 2 x half> %a
2940 declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
2941 <vscale x 4 x half>,
2942 <vscale x 4 x half>,
2946 define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64(<vscale x 4 x half> %0, i64 %1, i64 %2) nounwind {
2947 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64:
2948 ; CHECK: # %bb.0: # %entry
2949 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2950 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2951 ; CHECK-NEXT: vmv.v.v v8, v9
2954 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
2955 <vscale x 4 x half> undef,
2956 <vscale x 4 x half> %0,
2960 ret <vscale x 4 x half> %a
2963 declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
2964 <vscale x 4 x half>,
2965 <vscale x 4 x half>,
2971 define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i64(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
2972 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i64:
2973 ; CHECK: # %bb.0: # %entry
2974 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2975 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2978 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
2979 <vscale x 4 x half> %0,
2980 <vscale x 4 x half> %1,
2982 <vscale x 4 x i1> %3,
2985 ret <vscale x 4 x half> %a
2988 declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
2989 <vscale x 8 x half>,
2990 <vscale x 8 x half>,
2994 define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64(<vscale x 8 x half> %0, i64 %1, i64 %2) nounwind {
2995 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64:
2996 ; CHECK: # %bb.0: # %entry
2997 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2998 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2999 ; CHECK-NEXT: vmv.v.v v8, v10
3002 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
3003 <vscale x 8 x half> undef,
3004 <vscale x 8 x half> %0,
3008 ret <vscale x 8 x half> %a
3011 declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
3012 <vscale x 8 x half>,
3013 <vscale x 8 x half>,
3019 define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i64(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
3020 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i64:
3021 ; CHECK: # %bb.0: # %entry
3022 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3023 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
3026 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
3027 <vscale x 8 x half> %0,
3028 <vscale x 8 x half> %1,
3030 <vscale x 8 x i1> %3,
3033 ret <vscale x 8 x half> %a
3036 declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
3037 <vscale x 16 x half>,
3038 <vscale x 16 x half>,
3042 define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64(<vscale x 16 x half> %0, i64 %1, i64 %2) nounwind {
3043 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64:
3044 ; CHECK: # %bb.0: # %entry
3045 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3046 ; CHECK-NEXT: vrgather.vx v12, v8, a0
3047 ; CHECK-NEXT: vmv.v.v v8, v12
3050 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
3051 <vscale x 16 x half> undef,
3052 <vscale x 16 x half> %0,
3056 ret <vscale x 16 x half> %a
3059 declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
3060 <vscale x 16 x half>,
3061 <vscale x 16 x half>,
3067 define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i64(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
3068 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i64:
3069 ; CHECK: # %bb.0: # %entry
3070 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
3071 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
3074 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
3075 <vscale x 16 x half> %0,
3076 <vscale x 16 x half> %1,
3078 <vscale x 16 x i1> %3,
3081 ret <vscale x 16 x half> %a
3084 declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
3085 <vscale x 32 x half>,
3086 <vscale x 32 x half>,
3090 define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64(<vscale x 32 x half> %0, i64 %1, i64 %2) nounwind {
3091 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64:
3092 ; CHECK: # %bb.0: # %entry
3093 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3094 ; CHECK-NEXT: vrgather.vx v16, v8, a0
3095 ; CHECK-NEXT: vmv.v.v v8, v16
3098 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
3099 <vscale x 32 x half> undef,
3100 <vscale x 32 x half> %0,
3104 ret <vscale x 32 x half> %a
3107 declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
3108 <vscale x 32 x half>,
3109 <vscale x 32 x half>,
3115 define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i64(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
3116 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i64:
3117 ; CHECK: # %bb.0: # %entry
3118 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
3119 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
3122 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
3123 <vscale x 32 x half> %0,
3124 <vscale x 32 x half> %1,
3126 <vscale x 32 x i1> %3,
3129 ret <vscale x 32 x half> %a
3132 declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
3133 <vscale x 1 x float>,
3134 <vscale x 1 x float>,
3138 define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64(<vscale x 1 x float> %0, i64 %1, i64 %2) nounwind {
3139 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64:
3140 ; CHECK: # %bb.0: # %entry
3141 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3142 ; CHECK-NEXT: vrgather.vx v9, v8, a0
3143 ; CHECK-NEXT: vmv1r.v v8, v9
3146 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
3147 <vscale x 1 x float> undef,
3148 <vscale x 1 x float> %0,
3152 ret <vscale x 1 x float> %a
3155 declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
3156 <vscale x 1 x float>,
3157 <vscale x 1 x float>,
3163 define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i64(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
3164 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i64:
3165 ; CHECK: # %bb.0: # %entry
3166 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3167 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
3170 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
3171 <vscale x 1 x float> %0,
3172 <vscale x 1 x float> %1,
3174 <vscale x 1 x i1> %3,
3177 ret <vscale x 1 x float> %a
3180 declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
3181 <vscale x 2 x float>,
3182 <vscale x 2 x float>,
3186 define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64(<vscale x 2 x float> %0, i64 %1, i64 %2) nounwind {
3187 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64:
3188 ; CHECK: # %bb.0: # %entry
3189 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3190 ; CHECK-NEXT: vrgather.vx v9, v8, a0
3191 ; CHECK-NEXT: vmv.v.v v8, v9
3194 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
3195 <vscale x 2 x float> undef,
3196 <vscale x 2 x float> %0,
3200 ret <vscale x 2 x float> %a
3203 declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
3204 <vscale x 2 x float>,
3205 <vscale x 2 x float>,
3211 define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i64(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
3212 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i64:
3213 ; CHECK: # %bb.0: # %entry
3214 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3215 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
3218 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
3219 <vscale x 2 x float> %0,
3220 <vscale x 2 x float> %1,
3222 <vscale x 2 x i1> %3,
3225 ret <vscale x 2 x float> %a
3228 declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
3229 <vscale x 4 x float>,
3230 <vscale x 4 x float>,
3234 define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64(<vscale x 4 x float> %0, i64 %1, i64 %2) nounwind {
3235 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64:
3236 ; CHECK: # %bb.0: # %entry
3237 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3238 ; CHECK-NEXT: vrgather.vx v10, v8, a0
3239 ; CHECK-NEXT: vmv.v.v v8, v10
3242 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
3243 <vscale x 4 x float> undef,
3244 <vscale x 4 x float> %0,
3248 ret <vscale x 4 x float> %a
3251 declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
3252 <vscale x 4 x float>,
3253 <vscale x 4 x float>,
3259 define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i64(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
3260 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i64:
3261 ; CHECK: # %bb.0: # %entry
3262 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
3263 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
3266 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
3267 <vscale x 4 x float> %0,
3268 <vscale x 4 x float> %1,
3270 <vscale x 4 x i1> %3,
3273 ret <vscale x 4 x float> %a
3276 declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
3277 <vscale x 8 x float>,
3278 <vscale x 8 x float>,
3282 define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64(<vscale x 8 x float> %0, i64 %1, i64 %2) nounwind {
3283 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64:
3284 ; CHECK: # %bb.0: # %entry
3285 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3286 ; CHECK-NEXT: vrgather.vx v12, v8, a0
3287 ; CHECK-NEXT: vmv.v.v v8, v12
3290 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
3291 <vscale x 8 x float> undef,
3292 <vscale x 8 x float> %0,
3296 ret <vscale x 8 x float> %a
3299 declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
3300 <vscale x 8 x float>,
3301 <vscale x 8 x float>,
3307 define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i64(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
3308 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i64:
3309 ; CHECK: # %bb.0: # %entry
3310 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
3311 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
3314 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
3315 <vscale x 8 x float> %0,
3316 <vscale x 8 x float> %1,
3318 <vscale x 8 x i1> %3,
3321 ret <vscale x 8 x float> %a
3324 declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
3325 <vscale x 16 x float>,
3326 <vscale x 16 x float>,
3330 define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64(<vscale x 16 x float> %0, i64 %1, i64 %2) nounwind {
3331 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64:
3332 ; CHECK: # %bb.0: # %entry
3333 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3334 ; CHECK-NEXT: vrgather.vx v16, v8, a0
3335 ; CHECK-NEXT: vmv.v.v v8, v16
3338 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
3339 <vscale x 16 x float> undef,
3340 <vscale x 16 x float> %0,
3344 ret <vscale x 16 x float> %a
3347 declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
3348 <vscale x 16 x float>,
3349 <vscale x 16 x float>,
3355 define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i64(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
3356 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i64:
3357 ; CHECK: # %bb.0: # %entry
3358 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
3359 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
3362 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
3363 <vscale x 16 x float> %0,
3364 <vscale x 16 x float> %1,
3366 <vscale x 16 x i1> %3,
3369 ret <vscale x 16 x float> %a
3372 declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
3373 <vscale x 1 x double>,
3374 <vscale x 1 x double>,
3378 define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, i64 %1, i64 %2) nounwind {
3379 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64:
3380 ; CHECK: # %bb.0: # %entry
3381 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3382 ; CHECK-NEXT: vrgather.vx v9, v8, a0
3383 ; CHECK-NEXT: vmv.v.v v8, v9
3386 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
3387 <vscale x 1 x double> undef,
3388 <vscale x 1 x double> %0,
3392 ret <vscale x 1 x double> %a
3395 declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
3396 <vscale x 1 x double>,
3397 <vscale x 1 x double>,
3403 define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
3404 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64:
3405 ; CHECK: # %bb.0: # %entry
3406 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3407 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
3410 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
3411 <vscale x 1 x double> %0,
3412 <vscale x 1 x double> %1,
3414 <vscale x 1 x i1> %3,
3417 ret <vscale x 1 x double> %a
3420 declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
3421 <vscale x 2 x double>,
3422 <vscale x 2 x double>,
3426 define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, i64 %1, i64 %2) nounwind {
3427 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64:
3428 ; CHECK: # %bb.0: # %entry
3429 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3430 ; CHECK-NEXT: vrgather.vx v10, v8, a0
3431 ; CHECK-NEXT: vmv.v.v v8, v10
3434 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
3435 <vscale x 2 x double> undef,
3436 <vscale x 2 x double> %0,
3440 ret <vscale x 2 x double> %a
3443 declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
3444 <vscale x 2 x double>,
3445 <vscale x 2 x double>,
3451 define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
3452 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64:
3453 ; CHECK: # %bb.0: # %entry
3454 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
3455 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
3458 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
3459 <vscale x 2 x double> %0,
3460 <vscale x 2 x double> %1,
3462 <vscale x 2 x i1> %3,
3465 ret <vscale x 2 x double> %a
3468 declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
3469 <vscale x 4 x double>,
3470 <vscale x 4 x double>,
3474 define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, i64 %1, i64 %2) nounwind {
3475 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64:
3476 ; CHECK: # %bb.0: # %entry
3477 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
3478 ; CHECK-NEXT: vrgather.vx v12, v8, a0
3479 ; CHECK-NEXT: vmv.v.v v8, v12
3482 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
3483 <vscale x 4 x double> undef,
3484 <vscale x 4 x double> %0,
3488 ret <vscale x 4 x double> %a
3491 declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
3492 <vscale x 4 x double>,
3493 <vscale x 4 x double>,
3499 define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
3500 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64:
3501 ; CHECK: # %bb.0: # %entry
3502 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
3503 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
3506 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
3507 <vscale x 4 x double> %0,
3508 <vscale x 4 x double> %1,
3510 <vscale x 4 x i1> %3,
3513 ret <vscale x 4 x double> %a
3516 declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
3517 <vscale x 8 x double>,
3518 <vscale x 8 x double>,
3522 define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64(<vscale x 8 x double> %0, i64 %1, i64 %2) nounwind {
3523 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64:
3524 ; CHECK: # %bb.0: # %entry
3525 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3526 ; CHECK-NEXT: vrgather.vx v16, v8, a0
3527 ; CHECK-NEXT: vmv.v.v v8, v16
3530 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
3531 <vscale x 8 x double> undef,
3532 <vscale x 8 x double> %0,
3536 ret <vscale x 8 x double> %a
3539 declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
3540 <vscale x 8 x double>,
3541 <vscale x 8 x double>,
3547 define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
3548 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64:
3549 ; CHECK: # %bb.0: # %entry
3550 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
3551 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
3554 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
3555 <vscale x 8 x double> %0,
3556 <vscale x 8 x double> %1,
3558 <vscale x 8 x i1> %3,
3561 ret <vscale x 8 x double> %a
3564 define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64(<vscale x 1 x i8> %0, i64 %1) nounwind {
3565 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64:
3566 ; CHECK: # %bb.0: # %entry
3567 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
3568 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3569 ; CHECK-NEXT: vmv1r.v v8, v9
3572 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(
3573 <vscale x 1 x i8> undef,
3574 <vscale x 1 x i8> %0,
3578 ret <vscale x 1 x i8> %a
3581 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i64(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
3582 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i64:
3583 ; CHECK: # %bb.0: # %entry
3584 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
3585 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3588 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(
3589 <vscale x 1 x i8> %0,
3590 <vscale x 1 x i8> %1,
3592 <vscale x 1 x i1> %2,
3595 ret <vscale x 1 x i8> %a
3598 define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64(<vscale x 2 x i8> %0, i64 %1) nounwind {
3599 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64:
3600 ; CHECK: # %bb.0: # %entry
3601 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
3602 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3603 ; CHECK-NEXT: vmv1r.v v8, v9
3606 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(
3607 <vscale x 2 x i8> undef,
3608 <vscale x 2 x i8> %0,
3612 ret <vscale x 2 x i8> %a
3615 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i64(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
3616 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i64:
3617 ; CHECK: # %bb.0: # %entry
3618 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
3619 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3622 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(
3623 <vscale x 2 x i8> %0,
3624 <vscale x 2 x i8> %1,
3626 <vscale x 2 x i1> %2,
3629 ret <vscale x 2 x i8> %a
3632 define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64(<vscale x 4 x i8> %0, i64 %1) nounwind {
3633 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64:
3634 ; CHECK: # %bb.0: # %entry
3635 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
3636 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3637 ; CHECK-NEXT: vmv1r.v v8, v9
3640 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(
3641 <vscale x 4 x i8> undef,
3642 <vscale x 4 x i8> %0,
3646 ret <vscale x 4 x i8> %a
3649 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i64(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
3650 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i64:
3651 ; CHECK: # %bb.0: # %entry
3652 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
3653 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3656 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(
3657 <vscale x 4 x i8> %0,
3658 <vscale x 4 x i8> %1,
3660 <vscale x 4 x i1> %2,
3663 ret <vscale x 4 x i8> %a
3666 define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64(<vscale x 8 x i8> %0, i64 %1) nounwind {
3667 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64:
3668 ; CHECK: # %bb.0: # %entry
3669 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
3670 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3671 ; CHECK-NEXT: vmv.v.v v8, v9
3674 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(
3675 <vscale x 8 x i8> undef,
3676 <vscale x 8 x i8> %0,
3680 ret <vscale x 8 x i8> %a
3683 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i64(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
3684 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i64:
3685 ; CHECK: # %bb.0: # %entry
3686 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
3687 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3690 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(
3691 <vscale x 8 x i8> %0,
3692 <vscale x 8 x i8> %1,
3694 <vscale x 8 x i1> %2,
3697 ret <vscale x 8 x i8> %a
3700 define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64(<vscale x 16 x i8> %0, i64 %1) nounwind {
3701 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64:
3702 ; CHECK: # %bb.0: # %entry
3703 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
3704 ; CHECK-NEXT: vrgather.vi v10, v8, 9
3705 ; CHECK-NEXT: vmv.v.v v8, v10
3708 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(
3709 <vscale x 16 x i8> undef,
3710 <vscale x 16 x i8> %0,
3714 ret <vscale x 16 x i8> %a
3717 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i64(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
3718 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i64:
3719 ; CHECK: # %bb.0: # %entry
3720 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
3721 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
3724 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(
3725 <vscale x 16 x i8> %0,
3726 <vscale x 16 x i8> %1,
3728 <vscale x 16 x i1> %2,
3731 ret <vscale x 16 x i8> %a
3734 define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64(<vscale x 32 x i8> %0, i64 %1) nounwind {
3735 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64:
3736 ; CHECK: # %bb.0: # %entry
3737 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
3738 ; CHECK-NEXT: vrgather.vi v12, v8, 9
3739 ; CHECK-NEXT: vmv.v.v v8, v12
3742 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(
3743 <vscale x 32 x i8> undef,
3744 <vscale x 32 x i8> %0,
3748 ret <vscale x 32 x i8> %a
3751 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i64(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
3752 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i64:
3753 ; CHECK: # %bb.0: # %entry
3754 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
3755 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
3758 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(
3759 <vscale x 32 x i8> %0,
3760 <vscale x 32 x i8> %1,
3762 <vscale x 32 x i1> %2,
3765 ret <vscale x 32 x i8> %a
3768 define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64(<vscale x 64 x i8> %0, i64 %1) nounwind {
3769 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64:
3770 ; CHECK: # %bb.0: # %entry
3771 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
3772 ; CHECK-NEXT: vrgather.vi v16, v8, 9
3773 ; CHECK-NEXT: vmv.v.v v8, v16
3776 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(
3777 <vscale x 64 x i8> undef,
3778 <vscale x 64 x i8> %0,
3782 ret <vscale x 64 x i8> %a
3785 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i64(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
3786 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i64:
3787 ; CHECK: # %bb.0: # %entry
3788 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
3789 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
3792 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(
3793 <vscale x 64 x i8> %0,
3794 <vscale x 64 x i8> %1,
3796 <vscale x 64 x i1> %2,
3799 ret <vscale x 64 x i8> %a
3802 define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64(<vscale x 1 x i16> %0, i64 %1) nounwind {
3803 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64:
3804 ; CHECK: # %bb.0: # %entry
3805 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3806 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3807 ; CHECK-NEXT: vmv1r.v v8, v9
3810 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(
3811 <vscale x 1 x i16> undef,
3812 <vscale x 1 x i16> %0,
3816 ret <vscale x 1 x i16> %a
3819 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i64(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
3820 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i64:
3821 ; CHECK: # %bb.0: # %entry
3822 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
3823 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3826 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(
3827 <vscale x 1 x i16> %0,
3828 <vscale x 1 x i16> %1,
3830 <vscale x 1 x i1> %2,
3833 ret <vscale x 1 x i16> %a
3836 define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64(<vscale x 2 x i16> %0, i64 %1) nounwind {
3837 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64:
3838 ; CHECK: # %bb.0: # %entry
3839 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3840 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3841 ; CHECK-NEXT: vmv1r.v v8, v9
3844 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(
3845 <vscale x 2 x i16> undef,
3846 <vscale x 2 x i16> %0,
3850 ret <vscale x 2 x i16> %a
3853 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i64(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
3854 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i64:
3855 ; CHECK: # %bb.0: # %entry
3856 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
3857 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3860 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(
3861 <vscale x 2 x i16> %0,
3862 <vscale x 2 x i16> %1,
3864 <vscale x 2 x i1> %2,
3867 ret <vscale x 2 x i16> %a
3870 define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64(<vscale x 4 x i16> %0, i64 %1) nounwind {
3871 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64:
3872 ; CHECK: # %bb.0: # %entry
3873 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3874 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3875 ; CHECK-NEXT: vmv.v.v v8, v9
3878 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(
3879 <vscale x 4 x i16> undef,
3880 <vscale x 4 x i16> %0,
3884 ret <vscale x 4 x i16> %a
3887 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i64(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
3888 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i64:
3889 ; CHECK: # %bb.0: # %entry
3890 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
3891 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3894 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(
3895 <vscale x 4 x i16> %0,
3896 <vscale x 4 x i16> %1,
3898 <vscale x 4 x i1> %2,
3901 ret <vscale x 4 x i16> %a
3904 define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64(<vscale x 8 x i16> %0, i64 %1) nounwind {
3905 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64:
3906 ; CHECK: # %bb.0: # %entry
3907 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3908 ; CHECK-NEXT: vrgather.vi v10, v8, 9
3909 ; CHECK-NEXT: vmv.v.v v8, v10
3912 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(
3913 <vscale x 8 x i16> undef,
3914 <vscale x 8 x i16> %0,
3918 ret <vscale x 8 x i16> %a
3921 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i64(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
3922 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i64:
3923 ; CHECK: # %bb.0: # %entry
3924 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
3925 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
3928 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(
3929 <vscale x 8 x i16> %0,
3930 <vscale x 8 x i16> %1,
3932 <vscale x 8 x i1> %2,
3935 ret <vscale x 8 x i16> %a
3938 define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64(<vscale x 16 x i16> %0, i64 %1) nounwind {
3939 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64:
3940 ; CHECK: # %bb.0: # %entry
3941 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3942 ; CHECK-NEXT: vrgather.vi v12, v8, 9
3943 ; CHECK-NEXT: vmv.v.v v8, v12
3946 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(
3947 <vscale x 16 x i16> undef,
3948 <vscale x 16 x i16> %0,
3952 ret <vscale x 16 x i16> %a
3955 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i64(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
3956 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i64:
3957 ; CHECK: # %bb.0: # %entry
3958 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
3959 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
3962 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(
3963 <vscale x 16 x i16> %0,
3964 <vscale x 16 x i16> %1,
3966 <vscale x 16 x i1> %2,
3969 ret <vscale x 16 x i16> %a
3972 define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64(<vscale x 32 x i16> %0, i64 %1) nounwind {
3973 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64:
3974 ; CHECK: # %bb.0: # %entry
3975 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3976 ; CHECK-NEXT: vrgather.vi v16, v8, 9
3977 ; CHECK-NEXT: vmv.v.v v8, v16
3980 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(
3981 <vscale x 32 x i16> undef,
3982 <vscale x 32 x i16> %0,
3986 ret <vscale x 32 x i16> %a
3989 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i64(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
3990 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i64:
3991 ; CHECK: # %bb.0: # %entry
3992 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
3993 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
3996 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(
3997 <vscale x 32 x i16> %0,
3998 <vscale x 32 x i16> %1,
4000 <vscale x 32 x i1> %2,
4003 ret <vscale x 32 x i16> %a
4006 define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64(<vscale x 1 x i32> %0, i64 %1) nounwind {
4007 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64:
4008 ; CHECK: # %bb.0: # %entry
4009 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4010 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4011 ; CHECK-NEXT: vmv1r.v v8, v9
4014 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(
4015 <vscale x 1 x i32> undef,
4016 <vscale x 1 x i32> %0,
4020 ret <vscale x 1 x i32> %a
4023 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i64(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
4024 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i64:
4025 ; CHECK: # %bb.0: # %entry
4026 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
4027 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4030 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(
4031 <vscale x 1 x i32> %0,
4032 <vscale x 1 x i32> %1,
4034 <vscale x 1 x i1> %2,
4037 ret <vscale x 1 x i32> %a
4040 define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64(<vscale x 2 x i32> %0, i64 %1) nounwind {
4041 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64:
4042 ; CHECK: # %bb.0: # %entry
4043 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4044 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4045 ; CHECK-NEXT: vmv.v.v v8, v9
4048 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(
4049 <vscale x 2 x i32> undef,
4050 <vscale x 2 x i32> %0,
4054 ret <vscale x 2 x i32> %a
4057 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i64(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
4058 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i64:
4059 ; CHECK: # %bb.0: # %entry
4060 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
4061 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4064 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(
4065 <vscale x 2 x i32> %0,
4066 <vscale x 2 x i32> %1,
4068 <vscale x 2 x i1> %2,
4071 ret <vscale x 2 x i32> %a
4074 define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64(<vscale x 4 x i32> %0, i64 %1) nounwind {
4075 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64:
4076 ; CHECK: # %bb.0: # %entry
4077 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4078 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4079 ; CHECK-NEXT: vmv.v.v v8, v10
4082 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(
4083 <vscale x 4 x i32> undef,
4084 <vscale x 4 x i32> %0,
4088 ret <vscale x 4 x i32> %a
4091 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i64(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
4092 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i64:
4093 ; CHECK: # %bb.0: # %entry
4094 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
4095 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4098 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(
4099 <vscale x 4 x i32> %0,
4100 <vscale x 4 x i32> %1,
4102 <vscale x 4 x i1> %2,
4105 ret <vscale x 4 x i32> %a
4108 define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64(<vscale x 8 x i32> %0, i64 %1) nounwind {
4109 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64:
4110 ; CHECK: # %bb.0: # %entry
4111 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4112 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4113 ; CHECK-NEXT: vmv.v.v v8, v12
4116 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(
4117 <vscale x 8 x i32> undef,
4118 <vscale x 8 x i32> %0,
4122 ret <vscale x 8 x i32> %a
4125 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i64(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
4126 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i64:
4127 ; CHECK: # %bb.0: # %entry
4128 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
4129 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4132 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(
4133 <vscale x 8 x i32> %0,
4134 <vscale x 8 x i32> %1,
4136 <vscale x 8 x i1> %2,
4139 ret <vscale x 8 x i32> %a
4142 define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64(<vscale x 16 x i32> %0, i64 %1) nounwind {
4143 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64:
4144 ; CHECK: # %bb.0: # %entry
4145 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4146 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4147 ; CHECK-NEXT: vmv.v.v v8, v16
4150 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(
4151 <vscale x 16 x i32> undef,
4152 <vscale x 16 x i32> %0,
4156 ret <vscale x 16 x i32> %a
4159 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i64(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
4160 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i64:
4161 ; CHECK: # %bb.0: # %entry
4162 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
4163 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4166 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(
4167 <vscale x 16 x i32> %0,
4168 <vscale x 16 x i32> %1,
4170 <vscale x 16 x i1> %2,
4173 ret <vscale x 16 x i32> %a
4176 define <vscale x 1 x i64> @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
4177 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64:
4178 ; CHECK: # %bb.0: # %entry
4179 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4180 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4181 ; CHECK-NEXT: vmv.v.v v8, v9
4184 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(
4185 <vscale x 1 x i64> undef,
4186 <vscale x 1 x i64> %0,
4190 ret <vscale x 1 x i64> %a
4193 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
4194 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64:
4195 ; CHECK: # %bb.0: # %entry
4196 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
4197 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4200 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(
4201 <vscale x 1 x i64> %0,
4202 <vscale x 1 x i64> %1,
4204 <vscale x 1 x i1> %2,
4207 ret <vscale x 1 x i64> %a
4210 define <vscale x 2 x i64> @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
4211 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64:
4212 ; CHECK: # %bb.0: # %entry
4213 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4214 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4215 ; CHECK-NEXT: vmv.v.v v8, v10
4218 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(
4219 <vscale x 2 x i64> undef,
4220 <vscale x 2 x i64> %0,
4224 ret <vscale x 2 x i64> %a
4227 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
4228 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64:
4229 ; CHECK: # %bb.0: # %entry
4230 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
4231 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4234 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(
4235 <vscale x 2 x i64> %0,
4236 <vscale x 2 x i64> %1,
4238 <vscale x 2 x i1> %2,
4241 ret <vscale x 2 x i64> %a
4244 define <vscale x 4 x i64> @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
4245 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64:
4246 ; CHECK: # %bb.0: # %entry
4247 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4248 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4249 ; CHECK-NEXT: vmv.v.v v8, v12
4252 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(
4253 <vscale x 4 x i64> undef,
4254 <vscale x 4 x i64> %0,
4258 ret <vscale x 4 x i64> %a
4261 define <vscale x 4 x i64> @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
4262 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64:
4263 ; CHECK: # %bb.0: # %entry
4264 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
4265 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4268 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(
4269 <vscale x 4 x i64> %0,
4270 <vscale x 4 x i64> %1,
4272 <vscale x 4 x i1> %2,
4275 ret <vscale x 4 x i64> %a
4278 define <vscale x 8 x i64> @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
4279 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64:
4280 ; CHECK: # %bb.0: # %entry
4281 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4282 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4283 ; CHECK-NEXT: vmv.v.v v8, v16
4286 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(
4287 <vscale x 8 x i64> undef,
4288 <vscale x 8 x i64> %0,
4292 ret <vscale x 8 x i64> %a
4295 define <vscale x 8 x i64> @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
4296 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64:
4297 ; CHECK: # %bb.0: # %entry
4298 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
4299 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4302 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(
4303 <vscale x 8 x i64> %0,
4304 <vscale x 8 x i64> %1,
4306 <vscale x 8 x i1> %2,
4309 ret <vscale x 8 x i64> %a
4312 define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64(<vscale x 1 x half> %0, i64 %1) nounwind {
4313 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64:
4314 ; CHECK: # %bb.0: # %entry
4315 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4316 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4317 ; CHECK-NEXT: vmv1r.v v8, v9
4320 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.i64(
4321 <vscale x 1 x half> undef,
4322 <vscale x 1 x half> %0,
4326 ret <vscale x 1 x half> %a
4329 define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i64(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
4330 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i64:
4331 ; CHECK: # %bb.0: # %entry
4332 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
4333 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4336 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.i64(
4337 <vscale x 1 x half> %0,
4338 <vscale x 1 x half> %1,
4340 <vscale x 1 x i1> %2,
4343 ret <vscale x 1 x half> %a
4346 define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64(<vscale x 2 x half> %0, i64 %1) nounwind {
4347 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64:
4348 ; CHECK: # %bb.0: # %entry
4349 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4350 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4351 ; CHECK-NEXT: vmv1r.v v8, v9
4354 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.i64(
4355 <vscale x 2 x half> undef,
4356 <vscale x 2 x half> %0,
4360 ret <vscale x 2 x half> %a
4363 define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i64(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
4364 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i64:
4365 ; CHECK: # %bb.0: # %entry
4366 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
4367 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4370 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.i64(
4371 <vscale x 2 x half> %0,
4372 <vscale x 2 x half> %1,
4374 <vscale x 2 x i1> %2,
4377 ret <vscale x 2 x half> %a
4380 define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64(<vscale x 4 x half> %0, i64 %1) nounwind {
4381 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64:
4382 ; CHECK: # %bb.0: # %entry
4383 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4384 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4385 ; CHECK-NEXT: vmv.v.v v8, v9
4388 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.i64(
4389 <vscale x 4 x half> undef,
4390 <vscale x 4 x half> %0,
4394 ret <vscale x 4 x half> %a
4397 define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i64(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
4398 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i64:
4399 ; CHECK: # %bb.0: # %entry
4400 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
4401 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4404 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.i64(
4405 <vscale x 4 x half> %0,
4406 <vscale x 4 x half> %1,
4408 <vscale x 4 x i1> %2,
4411 ret <vscale x 4 x half> %a
4414 define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64(<vscale x 8 x half> %0, i64 %1) nounwind {
4415 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64:
4416 ; CHECK: # %bb.0: # %entry
4417 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4418 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4419 ; CHECK-NEXT: vmv.v.v v8, v10
4422 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.i64(
4423 <vscale x 8 x half> undef,
4424 <vscale x 8 x half> %0,
4428 ret <vscale x 8 x half> %a
4431 define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i64(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
4432 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i64:
4433 ; CHECK: # %bb.0: # %entry
4434 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
4435 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4438 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.i64(
4439 <vscale x 8 x half> %0,
4440 <vscale x 8 x half> %1,
4442 <vscale x 8 x i1> %2,
4445 ret <vscale x 8 x half> %a
4448 define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64(<vscale x 16 x half> %0, i64 %1) nounwind {
4449 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64:
4450 ; CHECK: # %bb.0: # %entry
4451 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4452 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4453 ; CHECK-NEXT: vmv.v.v v8, v12
4456 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.i64(
4457 <vscale x 16 x half> undef,
4458 <vscale x 16 x half> %0,
4462 ret <vscale x 16 x half> %a
4465 define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i64(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
4466 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i64:
4467 ; CHECK: # %bb.0: # %entry
4468 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
4469 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4472 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.i64(
4473 <vscale x 16 x half> %0,
4474 <vscale x 16 x half> %1,
4476 <vscale x 16 x i1> %2,
4479 ret <vscale x 16 x half> %a
4482 define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64(<vscale x 32 x half> %0, i64 %1) nounwind {
4483 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64:
4484 ; CHECK: # %bb.0: # %entry
4485 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4486 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4487 ; CHECK-NEXT: vmv.v.v v8, v16
4490 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.i64(
4491 <vscale x 32 x half> undef,
4492 <vscale x 32 x half> %0,
4496 ret <vscale x 32 x half> %a
4499 define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i64(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
4500 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i64:
4501 ; CHECK: # %bb.0: # %entry
4502 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
4503 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4506 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.i64(
4507 <vscale x 32 x half> %0,
4508 <vscale x 32 x half> %1,
4510 <vscale x 32 x i1> %2,
4513 ret <vscale x 32 x half> %a
4516 define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64(<vscale x 1 x float> %0, i64 %1) nounwind {
4517 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64:
4518 ; CHECK: # %bb.0: # %entry
4519 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4520 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4521 ; CHECK-NEXT: vmv1r.v v8, v9
4524 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(
4525 <vscale x 1 x float> undef,
4526 <vscale x 1 x float> %0,
4530 ret <vscale x 1 x float> %a
4533 define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i64(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
4534 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i64:
4535 ; CHECK: # %bb.0: # %entry
4536 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
4537 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4540 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(
4541 <vscale x 1 x float> %0,
4542 <vscale x 1 x float> %1,
4544 <vscale x 1 x i1> %2,
4547 ret <vscale x 1 x float> %a
4550 define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64(<vscale x 2 x float> %0, i64 %1) nounwind {
4551 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64:
4552 ; CHECK: # %bb.0: # %entry
4553 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4554 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4555 ; CHECK-NEXT: vmv.v.v v8, v9
4558 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(
4559 <vscale x 2 x float> undef,
4560 <vscale x 2 x float> %0,
4564 ret <vscale x 2 x float> %a
4567 define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i64(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
4568 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i64:
4569 ; CHECK: # %bb.0: # %entry
4570 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
4571 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4574 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(
4575 <vscale x 2 x float> %0,
4576 <vscale x 2 x float> %1,
4578 <vscale x 2 x i1> %2,
4581 ret <vscale x 2 x float> %a
4584 define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64(<vscale x 4 x float> %0, i64 %1) nounwind {
4585 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64:
4586 ; CHECK: # %bb.0: # %entry
4587 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4588 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4589 ; CHECK-NEXT: vmv.v.v v8, v10
4592 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(
4593 <vscale x 4 x float> undef,
4594 <vscale x 4 x float> %0,
4598 ret <vscale x 4 x float> %a
4601 define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i64(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
4602 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i64:
4603 ; CHECK: # %bb.0: # %entry
4604 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
4605 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4608 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(
4609 <vscale x 4 x float> %0,
4610 <vscale x 4 x float> %1,
4612 <vscale x 4 x i1> %2,
4615 ret <vscale x 4 x float> %a
4618 define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64(<vscale x 8 x float> %0, i64 %1) nounwind {
4619 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64:
4620 ; CHECK: # %bb.0: # %entry
4621 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4622 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4623 ; CHECK-NEXT: vmv.v.v v8, v12
4626 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(
4627 <vscale x 8 x float> undef,
4628 <vscale x 8 x float> %0,
4632 ret <vscale x 8 x float> %a
4635 define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i64(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
4636 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i64:
4637 ; CHECK: # %bb.0: # %entry
4638 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
4639 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4642 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(
4643 <vscale x 8 x float> %0,
4644 <vscale x 8 x float> %1,
4646 <vscale x 8 x i1> %2,
4649 ret <vscale x 8 x float> %a
4652 define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64(<vscale x 16 x float> %0, i64 %1) nounwind {
4653 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64:
4654 ; CHECK: # %bb.0: # %entry
4655 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4656 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4657 ; CHECK-NEXT: vmv.v.v v8, v16
4660 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(
4661 <vscale x 16 x float> undef,
4662 <vscale x 16 x float> %0,
4666 ret <vscale x 16 x float> %a
4669 define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i64(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
4670 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i64:
4671 ; CHECK: # %bb.0: # %entry
4672 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
4673 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4676 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(
4677 <vscale x 16 x float> %0,
4678 <vscale x 16 x float> %1,
4680 <vscale x 16 x i1> %2,
4683 ret <vscale x 16 x float> %a
4686 define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, i64 %1) nounwind {
4687 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64:
4688 ; CHECK: # %bb.0: # %entry
4689 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4690 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4691 ; CHECK-NEXT: vmv.v.v v8, v9
4694 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(
4695 <vscale x 1 x double> undef,
4696 <vscale x 1 x double> %0,
4700 ret <vscale x 1 x double> %a
4703 define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
4704 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64:
4705 ; CHECK: # %bb.0: # %entry
4706 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
4707 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4710 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(
4711 <vscale x 1 x double> %0,
4712 <vscale x 1 x double> %1,
4714 <vscale x 1 x i1> %2,
4717 ret <vscale x 1 x double> %a
4720 define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, i64 %1) nounwind {
4721 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64:
4722 ; CHECK: # %bb.0: # %entry
4723 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4724 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4725 ; CHECK-NEXT: vmv.v.v v8, v10
4728 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(
4729 <vscale x 2 x double> undef,
4730 <vscale x 2 x double> %0,
4734 ret <vscale x 2 x double> %a
4737 define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
4738 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64:
4739 ; CHECK: # %bb.0: # %entry
4740 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
4741 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4744 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(
4745 <vscale x 2 x double> %0,
4746 <vscale x 2 x double> %1,
4748 <vscale x 2 x i1> %2,
4751 ret <vscale x 2 x double> %a
4754 define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, i64 %1) nounwind {
4755 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64:
4756 ; CHECK: # %bb.0: # %entry
4757 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4758 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4759 ; CHECK-NEXT: vmv.v.v v8, v12
4762 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(
4763 <vscale x 4 x double> undef,
4764 <vscale x 4 x double> %0,
4768 ret <vscale x 4 x double> %a
4771 define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
4772 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64:
4773 ; CHECK: # %bb.0: # %entry
4774 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
4775 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4778 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(
4779 <vscale x 4 x double> %0,
4780 <vscale x 4 x double> %1,
4782 <vscale x 4 x i1> %2,
4785 ret <vscale x 4 x double> %a
4788 define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64(<vscale x 8 x double> %0, i64 %1) nounwind {
4789 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64:
4790 ; CHECK: # %bb.0: # %entry
4791 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4792 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4793 ; CHECK-NEXT: vmv.v.v v8, v16
4796 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(
4797 <vscale x 8 x double> undef,
4798 <vscale x 8 x double> %0,
4802 ret <vscale x 8 x double> %a
4805 define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
4806 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64:
4807 ; CHECK: # %bb.0: # %entry
4808 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
4809 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4812 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(
4813 <vscale x 8 x double> %0,
4814 <vscale x 8 x double> %1,
4816 <vscale x 8 x i1> %2,
4819 ret <vscale x 8 x double> %a