1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.iXLen(
13 define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vrgather.vv v10, v8, v9
18 ; CHECK-NEXT: vmv1r.v v8, v10
21 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.iXLen(
22 <vscale x 1 x i8> undef,
27 ret <vscale x 1 x i8> %a
30 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
38 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
42 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
52 ret <vscale x 1 x i8> %a
55 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.iXLen(
61 define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
65 ; CHECK-NEXT: vrgather.vv v10, v8, v9
66 ; CHECK-NEXT: vmv1r.v v8, v10
69 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.iXLen(
70 <vscale x 2 x i8> undef,
75 ret <vscale x 2 x i8> %a
78 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.iXLen(
86 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
90 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
93 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.iXLen(
100 ret <vscale x 2 x i8> %a
103 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.iXLen(
109 define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
113 ; CHECK-NEXT: vrgather.vv v10, v8, v9
114 ; CHECK-NEXT: vmv1r.v v8, v10
117 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.iXLen(
118 <vscale x 4 x i8> undef,
119 <vscale x 4 x i8> %0,
120 <vscale x 4 x i8> %1,
123 ret <vscale x 4 x i8> %a
126 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.iXLen(
134 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
138 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
141 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.iXLen(
142 <vscale x 4 x i8> %0,
143 <vscale x 4 x i8> %1,
144 <vscale x 4 x i8> %2,
145 <vscale x 4 x i1> %3,
148 ret <vscale x 4 x i8> %a
151 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.iXLen(
157 define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
161 ; CHECK-NEXT: vrgather.vv v10, v8, v9
162 ; CHECK-NEXT: vmv.v.v v8, v10
165 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.iXLen(
166 <vscale x 8 x i8> undef,
167 <vscale x 8 x i8> %0,
168 <vscale x 8 x i8> %1,
171 ret <vscale x 8 x i8> %a
174 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.iXLen(
182 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
186 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
189 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.iXLen(
190 <vscale x 8 x i8> %0,
191 <vscale x 8 x i8> %1,
192 <vscale x 8 x i8> %2,
193 <vscale x 8 x i1> %3,
196 ret <vscale x 8 x i8> %a
199 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.iXLen(
205 define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
206 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8:
207 ; CHECK: # %bb.0: # %entry
208 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
209 ; CHECK-NEXT: vrgather.vv v12, v8, v10
210 ; CHECK-NEXT: vmv.v.v v8, v12
213 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.iXLen(
214 <vscale x 16 x i8> undef,
215 <vscale x 16 x i8> %0,
216 <vscale x 16 x i8> %1,
219 ret <vscale x 16 x i8> %a
222 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.iXLen(
230 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
234 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
237 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.iXLen(
238 <vscale x 16 x i8> %0,
239 <vscale x 16 x i8> %1,
240 <vscale x 16 x i8> %2,
241 <vscale x 16 x i1> %3,
244 ret <vscale x 16 x i8> %a
247 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.iXLen(
253 define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
254 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8:
255 ; CHECK: # %bb.0: # %entry
256 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
257 ; CHECK-NEXT: vrgather.vv v16, v8, v12
258 ; CHECK-NEXT: vmv.v.v v8, v16
261 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.iXLen(
262 <vscale x 32 x i8> undef,
263 <vscale x 32 x i8> %0,
264 <vscale x 32 x i8> %1,
267 ret <vscale x 32 x i8> %a
270 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.iXLen(
278 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
279 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8:
280 ; CHECK: # %bb.0: # %entry
281 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
282 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
285 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.iXLen(
286 <vscale x 32 x i8> %0,
287 <vscale x 32 x i8> %1,
288 <vscale x 32 x i8> %2,
289 <vscale x 32 x i1> %3,
292 ret <vscale x 32 x i8> %a
295 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.iXLen(
301 define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
302 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
305 ; CHECK-NEXT: vrgather.vv v24, v8, v16
306 ; CHECK-NEXT: vmv.v.v v8, v24
309 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.iXLen(
310 <vscale x 64 x i8> undef,
311 <vscale x 64 x i8> %0,
312 <vscale x 64 x i8> %1,
315 ret <vscale x 64 x i8> %a
318 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.iXLen(
326 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
327 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
328 ; CHECK: # %bb.0: # %entry
329 ; CHECK-NEXT: vl8r.v v24, (a0)
330 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
331 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
334 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.iXLen(
335 <vscale x 64 x i8> %0,
336 <vscale x 64 x i8> %1,
337 <vscale x 64 x i8> %2,
338 <vscale x 64 x i1> %3,
341 ret <vscale x 64 x i8> %a
344 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.iXLen(
350 define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
351 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16:
352 ; CHECK: # %bb.0: # %entry
353 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
354 ; CHECK-NEXT: vrgather.vv v10, v8, v9
355 ; CHECK-NEXT: vmv1r.v v8, v10
358 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.iXLen(
359 <vscale x 1 x i16> undef,
360 <vscale x 1 x i16> %0,
361 <vscale x 1 x i16> %1,
364 ret <vscale x 1 x i16> %a
367 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.iXLen(
375 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
376 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16:
377 ; CHECK: # %bb.0: # %entry
378 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
379 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
382 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.iXLen(
383 <vscale x 1 x i16> %0,
384 <vscale x 1 x i16> %1,
385 <vscale x 1 x i16> %2,
386 <vscale x 1 x i1> %3,
389 ret <vscale x 1 x i16> %a
392 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.iXLen(
398 define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
399 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16:
400 ; CHECK: # %bb.0: # %entry
401 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
402 ; CHECK-NEXT: vrgather.vv v10, v8, v9
403 ; CHECK-NEXT: vmv1r.v v8, v10
406 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.iXLen(
407 <vscale x 2 x i16> undef,
408 <vscale x 2 x i16> %0,
409 <vscale x 2 x i16> %1,
412 ret <vscale x 2 x i16> %a
415 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.iXLen(
423 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
424 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16:
425 ; CHECK: # %bb.0: # %entry
426 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
427 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
430 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.iXLen(
431 <vscale x 2 x i16> %0,
432 <vscale x 2 x i16> %1,
433 <vscale x 2 x i16> %2,
434 <vscale x 2 x i1> %3,
437 ret <vscale x 2 x i16> %a
440 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.iXLen(
446 define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
447 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16:
448 ; CHECK: # %bb.0: # %entry
449 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
450 ; CHECK-NEXT: vrgather.vv v10, v8, v9
451 ; CHECK-NEXT: vmv.v.v v8, v10
454 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.iXLen(
455 <vscale x 4 x i16> undef,
456 <vscale x 4 x i16> %0,
457 <vscale x 4 x i16> %1,
460 ret <vscale x 4 x i16> %a
463 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.iXLen(
471 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
472 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16:
473 ; CHECK: # %bb.0: # %entry
474 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
475 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
478 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.iXLen(
479 <vscale x 4 x i16> %0,
480 <vscale x 4 x i16> %1,
481 <vscale x 4 x i16> %2,
482 <vscale x 4 x i1> %3,
485 ret <vscale x 4 x i16> %a
488 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.iXLen(
494 define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
495 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16:
496 ; CHECK: # %bb.0: # %entry
497 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
498 ; CHECK-NEXT: vrgather.vv v12, v8, v10
499 ; CHECK-NEXT: vmv.v.v v8, v12
502 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.iXLen(
503 <vscale x 8 x i16> undef,
504 <vscale x 8 x i16> %0,
505 <vscale x 8 x i16> %1,
508 ret <vscale x 8 x i16> %a
511 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.iXLen(
519 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
520 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
523 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
526 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.iXLen(
527 <vscale x 8 x i16> %0,
528 <vscale x 8 x i16> %1,
529 <vscale x 8 x i16> %2,
530 <vscale x 8 x i1> %3,
533 ret <vscale x 8 x i16> %a
536 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.iXLen(
542 define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
543 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16:
544 ; CHECK: # %bb.0: # %entry
545 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
546 ; CHECK-NEXT: vrgather.vv v16, v8, v12
547 ; CHECK-NEXT: vmv.v.v v8, v16
550 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.iXLen(
551 <vscale x 16 x i16> undef,
552 <vscale x 16 x i16> %0,
553 <vscale x 16 x i16> %1,
556 ret <vscale x 16 x i16> %a
559 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.iXLen(
567 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
568 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16:
569 ; CHECK: # %bb.0: # %entry
570 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
571 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
574 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.iXLen(
575 <vscale x 16 x i16> %0,
576 <vscale x 16 x i16> %1,
577 <vscale x 16 x i16> %2,
578 <vscale x 16 x i1> %3,
581 ret <vscale x 16 x i16> %a
584 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.iXLen(
590 define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
591 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16:
592 ; CHECK: # %bb.0: # %entry
593 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
594 ; CHECK-NEXT: vrgather.vv v24, v8, v16
595 ; CHECK-NEXT: vmv.v.v v8, v24
598 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.iXLen(
599 <vscale x 32 x i16> undef,
600 <vscale x 32 x i16> %0,
601 <vscale x 32 x i16> %1,
604 ret <vscale x 32 x i16> %a
607 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.iXLen(
615 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
616 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
617 ; CHECK: # %bb.0: # %entry
618 ; CHECK-NEXT: vl8re16.v v24, (a0)
619 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
620 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
623 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.iXLen(
624 <vscale x 32 x i16> %0,
625 <vscale x 32 x i16> %1,
626 <vscale x 32 x i16> %2,
627 <vscale x 32 x i1> %3,
630 ret <vscale x 32 x i16> %a
633 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.iXLen(
639 define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
640 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32:
641 ; CHECK: # %bb.0: # %entry
642 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
643 ; CHECK-NEXT: vrgather.vv v10, v8, v9
644 ; CHECK-NEXT: vmv1r.v v8, v10
647 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.iXLen(
648 <vscale x 1 x i32> undef,
649 <vscale x 1 x i32> %0,
650 <vscale x 1 x i32> %1,
653 ret <vscale x 1 x i32> %a
656 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.iXLen(
664 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
665 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32:
666 ; CHECK: # %bb.0: # %entry
667 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
668 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
671 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.iXLen(
672 <vscale x 1 x i32> %0,
673 <vscale x 1 x i32> %1,
674 <vscale x 1 x i32> %2,
675 <vscale x 1 x i1> %3,
678 ret <vscale x 1 x i32> %a
681 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.iXLen(
687 define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
688 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32:
689 ; CHECK: # %bb.0: # %entry
690 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
691 ; CHECK-NEXT: vrgather.vv v10, v8, v9
692 ; CHECK-NEXT: vmv.v.v v8, v10
695 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.iXLen(
696 <vscale x 2 x i32> undef,
697 <vscale x 2 x i32> %0,
698 <vscale x 2 x i32> %1,
701 ret <vscale x 2 x i32> %a
704 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.iXLen(
712 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
713 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32:
714 ; CHECK: # %bb.0: # %entry
715 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
716 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
719 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.iXLen(
720 <vscale x 2 x i32> %0,
721 <vscale x 2 x i32> %1,
722 <vscale x 2 x i32> %2,
723 <vscale x 2 x i1> %3,
726 ret <vscale x 2 x i32> %a
729 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.iXLen(
735 define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
736 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32:
737 ; CHECK: # %bb.0: # %entry
738 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
739 ; CHECK-NEXT: vrgather.vv v12, v8, v10
740 ; CHECK-NEXT: vmv.v.v v8, v12
743 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.iXLen(
744 <vscale x 4 x i32> undef,
745 <vscale x 4 x i32> %0,
746 <vscale x 4 x i32> %1,
749 ret <vscale x 4 x i32> %a
752 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.iXLen(
760 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
761 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32:
762 ; CHECK: # %bb.0: # %entry
763 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
764 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
767 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.iXLen(
768 <vscale x 4 x i32> %0,
769 <vscale x 4 x i32> %1,
770 <vscale x 4 x i32> %2,
771 <vscale x 4 x i1> %3,
774 ret <vscale x 4 x i32> %a
777 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.iXLen(
783 define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
784 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32:
785 ; CHECK: # %bb.0: # %entry
786 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
787 ; CHECK-NEXT: vrgather.vv v16, v8, v12
788 ; CHECK-NEXT: vmv.v.v v8, v16
791 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.iXLen(
792 <vscale x 8 x i32> undef,
793 <vscale x 8 x i32> %0,
794 <vscale x 8 x i32> %1,
797 ret <vscale x 8 x i32> %a
800 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.iXLen(
808 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
809 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32:
810 ; CHECK: # %bb.0: # %entry
811 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
812 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
815 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.iXLen(
816 <vscale x 8 x i32> %0,
817 <vscale x 8 x i32> %1,
818 <vscale x 8 x i32> %2,
819 <vscale x 8 x i1> %3,
822 ret <vscale x 8 x i32> %a
825 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.iXLen(
831 define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
832 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32:
833 ; CHECK: # %bb.0: # %entry
834 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
835 ; CHECK-NEXT: vrgather.vv v24, v8, v16
836 ; CHECK-NEXT: vmv.v.v v8, v24
839 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.iXLen(
840 <vscale x 16 x i32> undef,
841 <vscale x 16 x i32> %0,
842 <vscale x 16 x i32> %1,
845 ret <vscale x 16 x i32> %a
848 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.iXLen(
856 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
857 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
858 ; CHECK: # %bb.0: # %entry
859 ; CHECK-NEXT: vl8re32.v v24, (a0)
860 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
861 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
864 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.iXLen(
865 <vscale x 16 x i32> %0,
866 <vscale x 16 x i32> %1,
867 <vscale x 16 x i32> %2,
868 <vscale x 16 x i1> %3,
871 ret <vscale x 16 x i32> %a
874 declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.iXLen(
880 define <vscale x 1 x i64> @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
881 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64:
882 ; CHECK: # %bb.0: # %entry
883 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
884 ; CHECK-NEXT: vrgather.vv v10, v8, v9
885 ; CHECK-NEXT: vmv.v.v v8, v10
888 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.iXLen(
889 <vscale x 1 x i64> undef,
890 <vscale x 1 x i64> %0,
891 <vscale x 1 x i64> %1,
894 ret <vscale x 1 x i64> %a
897 declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.iXLen(
905 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
906 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64:
907 ; CHECK: # %bb.0: # %entry
908 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
909 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
912 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.iXLen(
913 <vscale x 1 x i64> %0,
914 <vscale x 1 x i64> %1,
915 <vscale x 1 x i64> %2,
916 <vscale x 1 x i1> %3,
919 ret <vscale x 1 x i64> %a
922 declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.iXLen(
928 define <vscale x 2 x i64> @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
929 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64:
930 ; CHECK: # %bb.0: # %entry
931 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
932 ; CHECK-NEXT: vrgather.vv v12, v8, v10
933 ; CHECK-NEXT: vmv.v.v v8, v12
936 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.iXLen(
937 <vscale x 2 x i64> undef,
938 <vscale x 2 x i64> %0,
939 <vscale x 2 x i64> %1,
942 ret <vscale x 2 x i64> %a
945 declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.iXLen(
953 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
954 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64:
955 ; CHECK: # %bb.0: # %entry
956 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
957 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
960 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.iXLen(
961 <vscale x 2 x i64> %0,
962 <vscale x 2 x i64> %1,
963 <vscale x 2 x i64> %2,
964 <vscale x 2 x i1> %3,
967 ret <vscale x 2 x i64> %a
970 declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.iXLen(
976 define <vscale x 4 x i64> @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
977 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64:
978 ; CHECK: # %bb.0: # %entry
979 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
980 ; CHECK-NEXT: vrgather.vv v16, v8, v12
981 ; CHECK-NEXT: vmv.v.v v8, v16
984 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.iXLen(
985 <vscale x 4 x i64> undef,
986 <vscale x 4 x i64> %0,
987 <vscale x 4 x i64> %1,
990 ret <vscale x 4 x i64> %a
993 declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.iXLen(
1001 define <vscale x 4 x i64> @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1002 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64:
1003 ; CHECK: # %bb.0: # %entry
1004 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1005 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1008 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.iXLen(
1009 <vscale x 4 x i64> %0,
1010 <vscale x 4 x i64> %1,
1011 <vscale x 4 x i64> %2,
1012 <vscale x 4 x i1> %3,
1015 ret <vscale x 4 x i64> %a
1018 declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.iXLen(
1024 define <vscale x 8 x i64> @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1025 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64:
1026 ; CHECK: # %bb.0: # %entry
1027 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1028 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1029 ; CHECK-NEXT: vmv.v.v v8, v24
1032 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.iXLen(
1033 <vscale x 8 x i64> undef,
1034 <vscale x 8 x i64> %0,
1035 <vscale x 8 x i64> %1,
1038 ret <vscale x 8 x i64> %a
1041 declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.iXLen(
1049 define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1050 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1051 ; CHECK: # %bb.0: # %entry
1052 ; CHECK-NEXT: vl8re64.v v24, (a0)
1053 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1054 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1057 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.iXLen(
1058 <vscale x 8 x i64> %0,
1059 <vscale x 8 x i64> %1,
1060 <vscale x 8 x i64> %2,
1061 <vscale x 8 x i1> %3,
1064 ret <vscale x 8 x i64> %a
1067 declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.iXLen(
1068 <vscale x 1 x half>,
1069 <vscale x 1 x half>,
1073 define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1074 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16:
1075 ; CHECK: # %bb.0: # %entry
1076 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1077 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1078 ; CHECK-NEXT: vmv1r.v v8, v10
1081 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.iXLen(
1082 <vscale x 1 x half> undef,
1083 <vscale x 1 x half> %0,
1084 <vscale x 1 x i16> %1,
1087 ret <vscale x 1 x half> %a
1090 declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.iXLen(
1091 <vscale x 1 x half>,
1092 <vscale x 1 x half>,
1098 define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1099 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16:
1100 ; CHECK: # %bb.0: # %entry
1101 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
1102 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1105 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.iXLen(
1106 <vscale x 1 x half> %0,
1107 <vscale x 1 x half> %1,
1108 <vscale x 1 x i16> %2,
1109 <vscale x 1 x i1> %3,
1112 ret <vscale x 1 x half> %a
1115 declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.iXLen(
1116 <vscale x 2 x half>,
1117 <vscale x 2 x half>,
1121 define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1122 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16:
1123 ; CHECK: # %bb.0: # %entry
1124 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1125 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1126 ; CHECK-NEXT: vmv1r.v v8, v10
1129 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.iXLen(
1130 <vscale x 2 x half> undef,
1131 <vscale x 2 x half> %0,
1132 <vscale x 2 x i16> %1,
1135 ret <vscale x 2 x half> %a
1138 declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.iXLen(
1139 <vscale x 2 x half>,
1140 <vscale x 2 x half>,
1146 define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1147 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16:
1148 ; CHECK: # %bb.0: # %entry
1149 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
1150 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1153 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.iXLen(
1154 <vscale x 2 x half> %0,
1155 <vscale x 2 x half> %1,
1156 <vscale x 2 x i16> %2,
1157 <vscale x 2 x i1> %3,
1160 ret <vscale x 2 x half> %a
1163 declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.iXLen(
1164 <vscale x 4 x half>,
1165 <vscale x 4 x half>,
1169 define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1170 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16:
1171 ; CHECK: # %bb.0: # %entry
1172 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1173 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1174 ; CHECK-NEXT: vmv.v.v v8, v10
1177 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.iXLen(
1178 <vscale x 4 x half> undef,
1179 <vscale x 4 x half> %0,
1180 <vscale x 4 x i16> %1,
1183 ret <vscale x 4 x half> %a
1186 declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.iXLen(
1187 <vscale x 4 x half>,
1188 <vscale x 4 x half>,
1194 define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1195 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16:
1196 ; CHECK: # %bb.0: # %entry
1197 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1198 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1201 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.iXLen(
1202 <vscale x 4 x half> %0,
1203 <vscale x 4 x half> %1,
1204 <vscale x 4 x i16> %2,
1205 <vscale x 4 x i1> %3,
1208 ret <vscale x 4 x half> %a
1211 declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.iXLen(
1212 <vscale x 8 x half>,
1213 <vscale x 8 x half>,
1217 define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1218 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16:
1219 ; CHECK: # %bb.0: # %entry
1220 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1221 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1222 ; CHECK-NEXT: vmv.v.v v8, v12
1225 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.iXLen(
1226 <vscale x 8 x half> undef,
1227 <vscale x 8 x half> %0,
1228 <vscale x 8 x i16> %1,
1231 ret <vscale x 8 x half> %a
1234 declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.iXLen(
1235 <vscale x 8 x half>,
1236 <vscale x 8 x half>,
1242 define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1243 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16:
1244 ; CHECK: # %bb.0: # %entry
1245 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1246 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1249 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.iXLen(
1250 <vscale x 8 x half> %0,
1251 <vscale x 8 x half> %1,
1252 <vscale x 8 x i16> %2,
1253 <vscale x 8 x i1> %3,
1256 ret <vscale x 8 x half> %a
1259 declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.iXLen(
1260 <vscale x 16 x half>,
1261 <vscale x 16 x half>,
1262 <vscale x 16 x i16>,
1265 define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
1266 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16:
1267 ; CHECK: # %bb.0: # %entry
1268 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1269 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1270 ; CHECK-NEXT: vmv.v.v v8, v16
1273 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.iXLen(
1274 <vscale x 16 x half> undef,
1275 <vscale x 16 x half> %0,
1276 <vscale x 16 x i16> %1,
1279 ret <vscale x 16 x half> %a
1282 declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.iXLen(
1283 <vscale x 16 x half>,
1284 <vscale x 16 x half>,
1285 <vscale x 16 x i16>,
1290 define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1291 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16:
1292 ; CHECK: # %bb.0: # %entry
1293 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1294 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1297 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.iXLen(
1298 <vscale x 16 x half> %0,
1299 <vscale x 16 x half> %1,
1300 <vscale x 16 x i16> %2,
1301 <vscale x 16 x i1> %3,
1304 ret <vscale x 16 x half> %a
1307 declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.iXLen(
1308 <vscale x 32 x half>,
1309 <vscale x 32 x half>,
1310 <vscale x 32 x i16>,
1313 define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
1314 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16:
1315 ; CHECK: # %bb.0: # %entry
1316 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1317 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1318 ; CHECK-NEXT: vmv.v.v v8, v24
1321 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.iXLen(
1322 <vscale x 32 x half> undef,
1323 <vscale x 32 x half> %0,
1324 <vscale x 32 x i16> %1,
1327 ret <vscale x 32 x half> %a
1330 declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.iXLen(
1331 <vscale x 32 x half>,
1332 <vscale x 32 x half>,
1333 <vscale x 32 x i16>,
1338 define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1339 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
1340 ; CHECK: # %bb.0: # %entry
1341 ; CHECK-NEXT: vl8re16.v v24, (a0)
1342 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1343 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1346 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.iXLen(
1347 <vscale x 32 x half> %0,
1348 <vscale x 32 x half> %1,
1349 <vscale x 32 x i16> %2,
1350 <vscale x 32 x i1> %3,
1353 ret <vscale x 32 x half> %a
1356 declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.iXLen(
1357 <vscale x 1 x float>,
1358 <vscale x 1 x float>,
1362 define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1363 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32:
1364 ; CHECK: # %bb.0: # %entry
1365 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1366 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1367 ; CHECK-NEXT: vmv1r.v v8, v10
1370 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.iXLen(
1371 <vscale x 1 x float> undef,
1372 <vscale x 1 x float> %0,
1373 <vscale x 1 x i32> %1,
1376 ret <vscale x 1 x float> %a
1379 declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.iXLen(
1380 <vscale x 1 x float>,
1381 <vscale x 1 x float>,
1387 define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1388 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32:
1389 ; CHECK: # %bb.0: # %entry
1390 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1391 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1394 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.iXLen(
1395 <vscale x 1 x float> %0,
1396 <vscale x 1 x float> %1,
1397 <vscale x 1 x i32> %2,
1398 <vscale x 1 x i1> %3,
1401 ret <vscale x 1 x float> %a
1404 declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.iXLen(
1405 <vscale x 2 x float>,
1406 <vscale x 2 x float>,
1410 define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1411 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32:
1412 ; CHECK: # %bb.0: # %entry
1413 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1414 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1415 ; CHECK-NEXT: vmv.v.v v8, v10
1418 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.iXLen(
1419 <vscale x 2 x float> undef,
1420 <vscale x 2 x float> %0,
1421 <vscale x 2 x i32> %1,
1424 ret <vscale x 2 x float> %a
1427 declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.iXLen(
1428 <vscale x 2 x float>,
1429 <vscale x 2 x float>,
1435 define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1436 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32:
1437 ; CHECK: # %bb.0: # %entry
1438 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1439 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1442 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.iXLen(
1443 <vscale x 2 x float> %0,
1444 <vscale x 2 x float> %1,
1445 <vscale x 2 x i32> %2,
1446 <vscale x 2 x i1> %3,
1449 ret <vscale x 2 x float> %a
1452 declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.iXLen(
1453 <vscale x 4 x float>,
1454 <vscale x 4 x float>,
1458 define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1459 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32:
1460 ; CHECK: # %bb.0: # %entry
1461 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1462 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1463 ; CHECK-NEXT: vmv.v.v v8, v12
1466 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.iXLen(
1467 <vscale x 4 x float> undef,
1468 <vscale x 4 x float> %0,
1469 <vscale x 4 x i32> %1,
1472 ret <vscale x 4 x float> %a
1475 declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.iXLen(
1476 <vscale x 4 x float>,
1477 <vscale x 4 x float>,
1483 define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1484 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32:
1485 ; CHECK: # %bb.0: # %entry
1486 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1487 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1490 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.iXLen(
1491 <vscale x 4 x float> %0,
1492 <vscale x 4 x float> %1,
1493 <vscale x 4 x i32> %2,
1494 <vscale x 4 x i1> %3,
1497 ret <vscale x 4 x float> %a
1500 declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.iXLen(
1501 <vscale x 8 x float>,
1502 <vscale x 8 x float>,
1506 define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1507 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32:
1508 ; CHECK: # %bb.0: # %entry
1509 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1510 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1511 ; CHECK-NEXT: vmv.v.v v8, v16
1514 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.iXLen(
1515 <vscale x 8 x float> undef,
1516 <vscale x 8 x float> %0,
1517 <vscale x 8 x i32> %1,
1520 ret <vscale x 8 x float> %a
1523 declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.iXLen(
1524 <vscale x 8 x float>,
1525 <vscale x 8 x float>,
1531 define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1532 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32:
1533 ; CHECK: # %bb.0: # %entry
1534 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1535 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1538 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.iXLen(
1539 <vscale x 8 x float> %0,
1540 <vscale x 8 x float> %1,
1541 <vscale x 8 x i32> %2,
1542 <vscale x 8 x i1> %3,
1545 ret <vscale x 8 x float> %a
1548 declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.iXLen(
1549 <vscale x 16 x float>,
1550 <vscale x 16 x float>,
1551 <vscale x 16 x i32>,
1554 define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1555 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32:
1556 ; CHECK: # %bb.0: # %entry
1557 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1558 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1559 ; CHECK-NEXT: vmv.v.v v8, v24
1562 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.iXLen(
1563 <vscale x 16 x float> undef,
1564 <vscale x 16 x float> %0,
1565 <vscale x 16 x i32> %1,
1568 ret <vscale x 16 x float> %a
1571 declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.iXLen(
1572 <vscale x 16 x float>,
1573 <vscale x 16 x float>,
1574 <vscale x 16 x i32>,
1579 define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1580 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
1581 ; CHECK: # %bb.0: # %entry
1582 ; CHECK-NEXT: vl8re32.v v24, (a0)
1583 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1584 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1587 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.iXLen(
1588 <vscale x 16 x float> %0,
1589 <vscale x 16 x float> %1,
1590 <vscale x 16 x i32> %2,
1591 <vscale x 16 x i1> %3,
1594 ret <vscale x 16 x float> %a
1597 declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.iXLen(
1598 <vscale x 1 x double>,
1599 <vscale x 1 x double>,
1603 define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
1604 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64:
1605 ; CHECK: # %bb.0: # %entry
1606 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1607 ; CHECK-NEXT: vrgather.vv v10, v8, v9
1608 ; CHECK-NEXT: vmv.v.v v8, v10
1611 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.iXLen(
1612 <vscale x 1 x double> undef,
1613 <vscale x 1 x double> %0,
1614 <vscale x 1 x i64> %1,
1617 ret <vscale x 1 x double> %a
1620 declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.iXLen(
1621 <vscale x 1 x double>,
1622 <vscale x 1 x double>,
1628 define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1629 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64:
1630 ; CHECK: # %bb.0: # %entry
1631 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1632 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
1635 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.iXLen(
1636 <vscale x 1 x double> %0,
1637 <vscale x 1 x double> %1,
1638 <vscale x 1 x i64> %2,
1639 <vscale x 1 x i1> %3,
1642 ret <vscale x 1 x double> %a
1645 declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.iXLen(
1646 <vscale x 2 x double>,
1647 <vscale x 2 x double>,
1651 define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
1652 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64:
1653 ; CHECK: # %bb.0: # %entry
1654 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1655 ; CHECK-NEXT: vrgather.vv v12, v8, v10
1656 ; CHECK-NEXT: vmv.v.v v8, v12
1659 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.iXLen(
1660 <vscale x 2 x double> undef,
1661 <vscale x 2 x double> %0,
1662 <vscale x 2 x i64> %1,
1665 ret <vscale x 2 x double> %a
1668 declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.iXLen(
1669 <vscale x 2 x double>,
1670 <vscale x 2 x double>,
1676 define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1677 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64:
1678 ; CHECK: # %bb.0: # %entry
1679 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1680 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
1683 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.iXLen(
1684 <vscale x 2 x double> %0,
1685 <vscale x 2 x double> %1,
1686 <vscale x 2 x i64> %2,
1687 <vscale x 2 x i1> %3,
1690 ret <vscale x 2 x double> %a
1693 declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.iXLen(
1694 <vscale x 4 x double>,
1695 <vscale x 4 x double>,
1699 define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
1700 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64:
1701 ; CHECK: # %bb.0: # %entry
1702 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1703 ; CHECK-NEXT: vrgather.vv v16, v8, v12
1704 ; CHECK-NEXT: vmv.v.v v8, v16
1707 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.iXLen(
1708 <vscale x 4 x double> undef,
1709 <vscale x 4 x double> %0,
1710 <vscale x 4 x i64> %1,
1713 ret <vscale x 4 x double> %a
1716 declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.iXLen(
1717 <vscale x 4 x double>,
1718 <vscale x 4 x double>,
1724 define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1725 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64:
1726 ; CHECK: # %bb.0: # %entry
1727 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1728 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
1731 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.iXLen(
1732 <vscale x 4 x double> %0,
1733 <vscale x 4 x double> %1,
1734 <vscale x 4 x i64> %2,
1735 <vscale x 4 x i1> %3,
1738 ret <vscale x 4 x double> %a
1741 declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.iXLen(
1742 <vscale x 8 x double>,
1743 <vscale x 8 x double>,
1747 define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1748 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64:
1749 ; CHECK: # %bb.0: # %entry
1750 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1751 ; CHECK-NEXT: vrgather.vv v24, v8, v16
1752 ; CHECK-NEXT: vmv.v.v v8, v24
1755 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.iXLen(
1756 <vscale x 8 x double> undef,
1757 <vscale x 8 x double> %0,
1758 <vscale x 8 x i64> %1,
1761 ret <vscale x 8 x double> %a
1764 declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.iXLen(
1765 <vscale x 8 x double>,
1766 <vscale x 8 x double>,
1772 define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1773 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
1774 ; CHECK: # %bb.0: # %entry
1775 ; CHECK-NEXT: vl8re64.v v24, (a0)
1776 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1777 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
1780 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.iXLen(
1781 <vscale x 8 x double> %0,
1782 <vscale x 8 x double> %1,
1783 <vscale x 8 x i64> %2,
1784 <vscale x 8 x i1> %3,
1787 ret <vscale x 8 x double> %a
1790 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.iXLen(
1796 define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
1797 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
1798 ; CHECK: # %bb.0: # %entry
1799 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1800 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1801 ; CHECK-NEXT: vmv1r.v v8, v9
1804 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.iXLen(
1805 <vscale x 1 x i8> undef,
1806 <vscale x 1 x i8> %0,
1810 ret <vscale x 1 x i8> %a
1813 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.iXLen(
1821 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1822 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8:
1823 ; CHECK: # %bb.0: # %entry
1824 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1825 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1828 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.iXLen(
1829 <vscale x 1 x i8> %0,
1830 <vscale x 1 x i8> %1,
1832 <vscale x 1 x i1> %3,
1835 ret <vscale x 1 x i8> %a
1838 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.iXLen(
1844 define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
1845 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8:
1846 ; CHECK: # %bb.0: # %entry
1847 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1848 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1849 ; CHECK-NEXT: vmv1r.v v8, v9
1852 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.iXLen(
1853 <vscale x 2 x i8> undef,
1854 <vscale x 2 x i8> %0,
1858 ret <vscale x 2 x i8> %a
1861 declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.iXLen(
1869 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1870 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8:
1871 ; CHECK: # %bb.0: # %entry
1872 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1873 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1876 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.iXLen(
1877 <vscale x 2 x i8> %0,
1878 <vscale x 2 x i8> %1,
1880 <vscale x 2 x i1> %3,
1883 ret <vscale x 2 x i8> %a
1886 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.iXLen(
1892 define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
1893 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8:
1894 ; CHECK: # %bb.0: # %entry
1895 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1896 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1897 ; CHECK-NEXT: vmv1r.v v8, v9
1900 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.iXLen(
1901 <vscale x 4 x i8> undef,
1902 <vscale x 4 x i8> %0,
1906 ret <vscale x 4 x i8> %a
1909 declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.iXLen(
1917 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1918 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8:
1919 ; CHECK: # %bb.0: # %entry
1920 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1921 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1924 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.iXLen(
1925 <vscale x 4 x i8> %0,
1926 <vscale x 4 x i8> %1,
1928 <vscale x 4 x i1> %3,
1931 ret <vscale x 4 x i8> %a
1934 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.iXLen(
1940 define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
1941 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8:
1942 ; CHECK: # %bb.0: # %entry
1943 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1944 ; CHECK-NEXT: vrgather.vx v9, v8, a0
1945 ; CHECK-NEXT: vmv.v.v v8, v9
1948 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.iXLen(
1949 <vscale x 8 x i8> undef,
1950 <vscale x 8 x i8> %0,
1954 ret <vscale x 8 x i8> %a
1957 declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.iXLen(
1965 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1966 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8:
1967 ; CHECK: # %bb.0: # %entry
1968 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1969 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
1972 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.iXLen(
1973 <vscale x 8 x i8> %0,
1974 <vscale x 8 x i8> %1,
1976 <vscale x 8 x i1> %3,
1979 ret <vscale x 8 x i8> %a
1982 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.iXLen(
1988 define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
1989 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8:
1990 ; CHECK: # %bb.0: # %entry
1991 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1992 ; CHECK-NEXT: vrgather.vx v10, v8, a0
1993 ; CHECK-NEXT: vmv.v.v v8, v10
1996 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.iXLen(
1997 <vscale x 16 x i8> undef,
1998 <vscale x 16 x i8> %0,
2002 ret <vscale x 16 x i8> %a
2005 declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.iXLen(
2013 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2014 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8:
2015 ; CHECK: # %bb.0: # %entry
2016 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
2017 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2020 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.iXLen(
2021 <vscale x 16 x i8> %0,
2022 <vscale x 16 x i8> %1,
2024 <vscale x 16 x i1> %3,
2027 ret <vscale x 16 x i8> %a
2030 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.iXLen(
2036 define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
2037 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8:
2038 ; CHECK: # %bb.0: # %entry
2039 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2040 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2041 ; CHECK-NEXT: vmv.v.v v8, v12
2044 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.iXLen(
2045 <vscale x 32 x i8> undef,
2046 <vscale x 32 x i8> %0,
2050 ret <vscale x 32 x i8> %a
2053 declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.iXLen(
2061 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2062 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8:
2063 ; CHECK: # %bb.0: # %entry
2064 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
2065 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2068 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.iXLen(
2069 <vscale x 32 x i8> %0,
2070 <vscale x 32 x i8> %1,
2072 <vscale x 32 x i1> %3,
2075 ret <vscale x 32 x i8> %a
2078 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.iXLen(
2084 define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
2085 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8:
2086 ; CHECK: # %bb.0: # %entry
2087 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
2088 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2089 ; CHECK-NEXT: vmv.v.v v8, v16
2092 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.iXLen(
2093 <vscale x 64 x i8> undef,
2094 <vscale x 64 x i8> %0,
2098 ret <vscale x 64 x i8> %a
2101 declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.iXLen(
2109 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
2110 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8:
2111 ; CHECK: # %bb.0: # %entry
2112 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
2113 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2116 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.iXLen(
2117 <vscale x 64 x i8> %0,
2118 <vscale x 64 x i8> %1,
2120 <vscale x 64 x i1> %3,
2123 ret <vscale x 64 x i8> %a
2126 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.iXLen(
2132 define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
2133 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16:
2134 ; CHECK: # %bb.0: # %entry
2135 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2136 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2137 ; CHECK-NEXT: vmv1r.v v8, v9
2140 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.iXLen(
2141 <vscale x 1 x i16> undef,
2142 <vscale x 1 x i16> %0,
2146 ret <vscale x 1 x i16> %a
2149 declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.iXLen(
2157 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2158 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16:
2159 ; CHECK: # %bb.0: # %entry
2160 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2161 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2164 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.iXLen(
2165 <vscale x 1 x i16> %0,
2166 <vscale x 1 x i16> %1,
2168 <vscale x 1 x i1> %3,
2171 ret <vscale x 1 x i16> %a
2174 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.iXLen(
2180 define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
2181 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16:
2182 ; CHECK: # %bb.0: # %entry
2183 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2184 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2185 ; CHECK-NEXT: vmv1r.v v8, v9
2188 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.iXLen(
2189 <vscale x 2 x i16> undef,
2190 <vscale x 2 x i16> %0,
2194 ret <vscale x 2 x i16> %a
2197 declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.iXLen(
2205 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2206 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16:
2207 ; CHECK: # %bb.0: # %entry
2208 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2209 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2212 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.iXLen(
2213 <vscale x 2 x i16> %0,
2214 <vscale x 2 x i16> %1,
2216 <vscale x 2 x i1> %3,
2219 ret <vscale x 2 x i16> %a
2222 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.iXLen(
2228 define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
2229 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16:
2230 ; CHECK: # %bb.0: # %entry
2231 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2232 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2233 ; CHECK-NEXT: vmv.v.v v8, v9
2236 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.iXLen(
2237 <vscale x 4 x i16> undef,
2238 <vscale x 4 x i16> %0,
2242 ret <vscale x 4 x i16> %a
2245 declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.iXLen(
2253 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2254 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16:
2255 ; CHECK: # %bb.0: # %entry
2256 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2257 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2260 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.iXLen(
2261 <vscale x 4 x i16> %0,
2262 <vscale x 4 x i16> %1,
2264 <vscale x 4 x i1> %3,
2267 ret <vscale x 4 x i16> %a
2270 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.iXLen(
2276 define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
2277 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16:
2278 ; CHECK: # %bb.0: # %entry
2279 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2280 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2281 ; CHECK-NEXT: vmv.v.v v8, v10
2284 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.iXLen(
2285 <vscale x 8 x i16> undef,
2286 <vscale x 8 x i16> %0,
2290 ret <vscale x 8 x i16> %a
2293 declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.iXLen(
2301 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2302 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16:
2303 ; CHECK: # %bb.0: # %entry
2304 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
2305 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2308 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.iXLen(
2309 <vscale x 8 x i16> %0,
2310 <vscale x 8 x i16> %1,
2312 <vscale x 8 x i1> %3,
2315 ret <vscale x 8 x i16> %a
2318 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.iXLen(
2319 <vscale x 16 x i16>,
2320 <vscale x 16 x i16>,
2324 define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
2325 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16:
2326 ; CHECK: # %bb.0: # %entry
2327 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2328 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2329 ; CHECK-NEXT: vmv.v.v v8, v12
2332 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.iXLen(
2333 <vscale x 16 x i16> undef,
2334 <vscale x 16 x i16> %0,
2338 ret <vscale x 16 x i16> %a
2341 declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.iXLen(
2342 <vscale x 16 x i16>,
2343 <vscale x 16 x i16>,
2349 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2350 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16:
2351 ; CHECK: # %bb.0: # %entry
2352 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
2353 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2356 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.iXLen(
2357 <vscale x 16 x i16> %0,
2358 <vscale x 16 x i16> %1,
2360 <vscale x 16 x i1> %3,
2363 ret <vscale x 16 x i16> %a
2366 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.iXLen(
2367 <vscale x 32 x i16>,
2368 <vscale x 32 x i16>,
2372 define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
2373 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16:
2374 ; CHECK: # %bb.0: # %entry
2375 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2376 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2377 ; CHECK-NEXT: vmv.v.v v8, v16
2380 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.iXLen(
2381 <vscale x 32 x i16> undef,
2382 <vscale x 32 x i16> %0,
2386 ret <vscale x 32 x i16> %a
2389 declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.iXLen(
2390 <vscale x 32 x i16>,
2391 <vscale x 32 x i16>,
2397 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2398 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16:
2399 ; CHECK: # %bb.0: # %entry
2400 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
2401 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2404 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.iXLen(
2405 <vscale x 32 x i16> %0,
2406 <vscale x 32 x i16> %1,
2408 <vscale x 32 x i1> %3,
2411 ret <vscale x 32 x i16> %a
2414 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.iXLen(
2420 define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
2421 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32:
2422 ; CHECK: # %bb.0: # %entry
2423 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2424 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2425 ; CHECK-NEXT: vmv1r.v v8, v9
2428 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.iXLen(
2429 <vscale x 1 x i32> undef,
2430 <vscale x 1 x i32> %0,
2434 ret <vscale x 1 x i32> %a
2437 declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.iXLen(
2445 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2446 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32:
2447 ; CHECK: # %bb.0: # %entry
2448 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
2449 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2452 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.iXLen(
2453 <vscale x 1 x i32> %0,
2454 <vscale x 1 x i32> %1,
2456 <vscale x 1 x i1> %3,
2459 ret <vscale x 1 x i32> %a
2462 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.iXLen(
2468 define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
2469 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32:
2470 ; CHECK: # %bb.0: # %entry
2471 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2472 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2473 ; CHECK-NEXT: vmv.v.v v8, v9
2476 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.iXLen(
2477 <vscale x 2 x i32> undef,
2478 <vscale x 2 x i32> %0,
2482 ret <vscale x 2 x i32> %a
2485 declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.iXLen(
2493 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2494 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32:
2495 ; CHECK: # %bb.0: # %entry
2496 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
2497 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2500 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.iXLen(
2501 <vscale x 2 x i32> %0,
2502 <vscale x 2 x i32> %1,
2504 <vscale x 2 x i1> %3,
2507 ret <vscale x 2 x i32> %a
2510 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.iXLen(
2516 define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
2517 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32:
2518 ; CHECK: # %bb.0: # %entry
2519 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2520 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2521 ; CHECK-NEXT: vmv.v.v v8, v10
2524 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.iXLen(
2525 <vscale x 4 x i32> undef,
2526 <vscale x 4 x i32> %0,
2530 ret <vscale x 4 x i32> %a
2533 declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.iXLen(
2541 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2542 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32:
2543 ; CHECK: # %bb.0: # %entry
2544 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2545 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2548 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.iXLen(
2549 <vscale x 4 x i32> %0,
2550 <vscale x 4 x i32> %1,
2552 <vscale x 4 x i1> %3,
2555 ret <vscale x 4 x i32> %a
2558 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.iXLen(
2564 define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
2565 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32:
2566 ; CHECK: # %bb.0: # %entry
2567 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2568 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2569 ; CHECK-NEXT: vmv.v.v v8, v12
2572 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.iXLen(
2573 <vscale x 8 x i32> undef,
2574 <vscale x 8 x i32> %0,
2578 ret <vscale x 8 x i32> %a
2581 declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.iXLen(
2589 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2590 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32:
2591 ; CHECK: # %bb.0: # %entry
2592 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
2593 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2596 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.iXLen(
2597 <vscale x 8 x i32> %0,
2598 <vscale x 8 x i32> %1,
2600 <vscale x 8 x i1> %3,
2603 ret <vscale x 8 x i32> %a
2606 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.iXLen(
2607 <vscale x 16 x i32>,
2608 <vscale x 16 x i32>,
2612 define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
2613 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32:
2614 ; CHECK: # %bb.0: # %entry
2615 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2616 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2617 ; CHECK-NEXT: vmv.v.v v8, v16
2620 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.iXLen(
2621 <vscale x 16 x i32> undef,
2622 <vscale x 16 x i32> %0,
2626 ret <vscale x 16 x i32> %a
2629 declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.iXLen(
2630 <vscale x 16 x i32>,
2631 <vscale x 16 x i32>,
2637 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2638 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32:
2639 ; CHECK: # %bb.0: # %entry
2640 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
2641 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2644 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.iXLen(
2645 <vscale x 16 x i32> %0,
2646 <vscale x 16 x i32> %1,
2648 <vscale x 16 x i1> %3,
2651 ret <vscale x 16 x i32> %a
2654 declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.iXLen(
2660 define <vscale x 1 x i64> @intrinsic_vrgather_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
2661 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i64_nxv1i64:
2662 ; CHECK: # %bb.0: # %entry
2663 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2664 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2665 ; CHECK-NEXT: vmv.v.v v8, v9
2668 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.iXLen(
2669 <vscale x 1 x i64> undef,
2670 <vscale x 1 x i64> %0,
2674 ret <vscale x 1 x i64> %a
2677 declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.iXLen(
2685 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2686 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64:
2687 ; CHECK: # %bb.0: # %entry
2688 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2689 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2692 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.iXLen(
2693 <vscale x 1 x i64> %0,
2694 <vscale x 1 x i64> %1,
2696 <vscale x 1 x i1> %3,
2699 ret <vscale x 1 x i64> %a
2702 declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.iXLen(
2708 define <vscale x 2 x i64> @intrinsic_vrgather_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
2709 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i64_nxv2i64:
2710 ; CHECK: # %bb.0: # %entry
2711 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2712 ; CHECK-NEXT: vrgather.vx v10, v8, a0
2713 ; CHECK-NEXT: vmv.v.v v8, v10
2716 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.iXLen(
2717 <vscale x 2 x i64> undef,
2718 <vscale x 2 x i64> %0,
2722 ret <vscale x 2 x i64> %a
2725 declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.iXLen(
2733 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2734 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64:
2735 ; CHECK: # %bb.0: # %entry
2736 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2737 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
2740 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.iXLen(
2741 <vscale x 2 x i64> %0,
2742 <vscale x 2 x i64> %1,
2744 <vscale x 2 x i1> %3,
2747 ret <vscale x 2 x i64> %a
2750 declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.iXLen(
2756 define <vscale x 4 x i64> @intrinsic_vrgather_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
2757 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i64_nxv4i64:
2758 ; CHECK: # %bb.0: # %entry
2759 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2760 ; CHECK-NEXT: vrgather.vx v12, v8, a0
2761 ; CHECK-NEXT: vmv.v.v v8, v12
2764 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.iXLen(
2765 <vscale x 4 x i64> undef,
2766 <vscale x 4 x i64> %0,
2770 ret <vscale x 4 x i64> %a
2773 declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.iXLen(
2781 define <vscale x 4 x i64> @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2782 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64:
2783 ; CHECK: # %bb.0: # %entry
2784 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2785 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
2788 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.iXLen(
2789 <vscale x 4 x i64> %0,
2790 <vscale x 4 x i64> %1,
2792 <vscale x 4 x i1> %3,
2795 ret <vscale x 4 x i64> %a
2798 declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.iXLen(
2804 define <vscale x 8 x i64> @intrinsic_vrgather_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
2805 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i64_nxv8i64:
2806 ; CHECK: # %bb.0: # %entry
2807 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2808 ; CHECK-NEXT: vrgather.vx v16, v8, a0
2809 ; CHECK-NEXT: vmv.v.v v8, v16
2812 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.iXLen(
2813 <vscale x 8 x i64> undef,
2814 <vscale x 8 x i64> %0,
2818 ret <vscale x 8 x i64> %a
2821 declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.iXLen(
2829 define <vscale x 8 x i64> @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2830 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64:
2831 ; CHECK: # %bb.0: # %entry
2832 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2833 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
2836 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.iXLen(
2837 <vscale x 8 x i64> %0,
2838 <vscale x 8 x i64> %1,
2840 <vscale x 8 x i1> %3,
2843 ret <vscale x 8 x i64> %a
2846 declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.iXLen(
2847 <vscale x 1 x half>,
2848 <vscale x 1 x half>,
2852 define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1, iXLen %2) nounwind {
2853 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16:
2854 ; CHECK: # %bb.0: # %entry
2855 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2856 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2857 ; CHECK-NEXT: vmv1r.v v8, v9
2860 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.iXLen(
2861 <vscale x 1 x half> undef,
2862 <vscale x 1 x half> %0,
2866 ret <vscale x 1 x half> %a
2869 declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.iXLen(
2870 <vscale x 1 x half>,
2871 <vscale x 1 x half>,
2877 define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2878 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16:
2879 ; CHECK: # %bb.0: # %entry
2880 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2881 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2884 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.iXLen(
2885 <vscale x 1 x half> %0,
2886 <vscale x 1 x half> %1,
2888 <vscale x 1 x i1> %3,
2891 ret <vscale x 1 x half> %a
2894 declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.iXLen(
2895 <vscale x 2 x half>,
2896 <vscale x 2 x half>,
2900 define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1, iXLen %2) nounwind {
2901 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16:
2902 ; CHECK: # %bb.0: # %entry
2903 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2904 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2905 ; CHECK-NEXT: vmv1r.v v8, v9
2908 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.iXLen(
2909 <vscale x 2 x half> undef,
2910 <vscale x 2 x half> %0,
2914 ret <vscale x 2 x half> %a
2917 declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.iXLen(
2918 <vscale x 2 x half>,
2919 <vscale x 2 x half>,
2925 define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2926 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16:
2927 ; CHECK: # %bb.0: # %entry
2928 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2929 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2932 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.iXLen(
2933 <vscale x 2 x half> %0,
2934 <vscale x 2 x half> %1,
2936 <vscale x 2 x i1> %3,
2939 ret <vscale x 2 x half> %a
2942 declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.iXLen(
2943 <vscale x 4 x half>,
2944 <vscale x 4 x half>,
2948 define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1, iXLen %2) nounwind {
2949 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16:
2950 ; CHECK: # %bb.0: # %entry
2951 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2952 ; CHECK-NEXT: vrgather.vx v9, v8, a0
2953 ; CHECK-NEXT: vmv.v.v v8, v9
2956 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.iXLen(
2957 <vscale x 4 x half> undef,
2958 <vscale x 4 x half> %0,
2962 ret <vscale x 4 x half> %a
2965 declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.iXLen(
2966 <vscale x 4 x half>,
2967 <vscale x 4 x half>,
2973 define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2974 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16:
2975 ; CHECK: # %bb.0: # %entry
2976 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
2977 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
2980 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.iXLen(
2981 <vscale x 4 x half> %0,
2982 <vscale x 4 x half> %1,
2984 <vscale x 4 x i1> %3,
2987 ret <vscale x 4 x half> %a
2990 declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.iXLen(
2991 <vscale x 8 x half>,
2992 <vscale x 8 x half>,
2996 define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1, iXLen %2) nounwind {
2997 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16:
2998 ; CHECK: # %bb.0: # %entry
2999 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3000 ; CHECK-NEXT: vrgather.vx v10, v8, a0
3001 ; CHECK-NEXT: vmv.v.v v8, v10
3004 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.iXLen(
3005 <vscale x 8 x half> undef,
3006 <vscale x 8 x half> %0,
3010 ret <vscale x 8 x half> %a
3013 declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.iXLen(
3014 <vscale x 8 x half>,
3015 <vscale x 8 x half>,
3021 define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3022 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16:
3023 ; CHECK: # %bb.0: # %entry
3024 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
3025 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
3028 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.iXLen(
3029 <vscale x 8 x half> %0,
3030 <vscale x 8 x half> %1,
3032 <vscale x 8 x i1> %3,
3035 ret <vscale x 8 x half> %a
3038 declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.iXLen(
3039 <vscale x 16 x half>,
3040 <vscale x 16 x half>,
3044 define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1, iXLen %2) nounwind {
3045 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16:
3046 ; CHECK: # %bb.0: # %entry
3047 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3048 ; CHECK-NEXT: vrgather.vx v12, v8, a0
3049 ; CHECK-NEXT: vmv.v.v v8, v12
3052 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.iXLen(
3053 <vscale x 16 x half> undef,
3054 <vscale x 16 x half> %0,
3058 ret <vscale x 16 x half> %a
3061 declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.iXLen(
3062 <vscale x 16 x half>,
3063 <vscale x 16 x half>,
3069 define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3070 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16:
3071 ; CHECK: # %bb.0: # %entry
3072 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
3073 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
3076 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.iXLen(
3077 <vscale x 16 x half> %0,
3078 <vscale x 16 x half> %1,
3080 <vscale x 16 x i1> %3,
3083 ret <vscale x 16 x half> %a
3086 declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.iXLen(
3087 <vscale x 32 x half>,
3088 <vscale x 32 x half>,
3092 define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1, iXLen %2) nounwind {
3093 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16:
3094 ; CHECK: # %bb.0: # %entry
3095 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3096 ; CHECK-NEXT: vrgather.vx v16, v8, a0
3097 ; CHECK-NEXT: vmv.v.v v8, v16
3100 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.iXLen(
3101 <vscale x 32 x half> undef,
3102 <vscale x 32 x half> %0,
3106 ret <vscale x 32 x half> %a
3109 declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.iXLen(
3110 <vscale x 32 x half>,
3111 <vscale x 32 x half>,
3117 define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3118 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16:
3119 ; CHECK: # %bb.0: # %entry
3120 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
3121 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
3124 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.iXLen(
3125 <vscale x 32 x half> %0,
3126 <vscale x 32 x half> %1,
3128 <vscale x 32 x i1> %3,
3131 ret <vscale x 32 x half> %a
3134 declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.iXLen(
3135 <vscale x 1 x float>,
3136 <vscale x 1 x float>,
3140 define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1, iXLen %2) nounwind {
3141 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32:
3142 ; CHECK: # %bb.0: # %entry
3143 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3144 ; CHECK-NEXT: vrgather.vx v9, v8, a0
3145 ; CHECK-NEXT: vmv1r.v v8, v9
3148 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.iXLen(
3149 <vscale x 1 x float> undef,
3150 <vscale x 1 x float> %0,
3154 ret <vscale x 1 x float> %a
3157 declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.iXLen(
3158 <vscale x 1 x float>,
3159 <vscale x 1 x float>,
3165 define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3166 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32:
3167 ; CHECK: # %bb.0: # %entry
3168 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3169 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
3172 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.iXLen(
3173 <vscale x 1 x float> %0,
3174 <vscale x 1 x float> %1,
3176 <vscale x 1 x i1> %3,
3179 ret <vscale x 1 x float> %a
3182 declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.iXLen(
3183 <vscale x 2 x float>,
3184 <vscale x 2 x float>,
3188 define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1, iXLen %2) nounwind {
3189 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32:
3190 ; CHECK: # %bb.0: # %entry
3191 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3192 ; CHECK-NEXT: vrgather.vx v9, v8, a0
3193 ; CHECK-NEXT: vmv.v.v v8, v9
3196 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.iXLen(
3197 <vscale x 2 x float> undef,
3198 <vscale x 2 x float> %0,
3202 ret <vscale x 2 x float> %a
3205 declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.iXLen(
3206 <vscale x 2 x float>,
3207 <vscale x 2 x float>,
3213 define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3214 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32:
3215 ; CHECK: # %bb.0: # %entry
3216 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3217 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
3220 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.iXLen(
3221 <vscale x 2 x float> %0,
3222 <vscale x 2 x float> %1,
3224 <vscale x 2 x i1> %3,
3227 ret <vscale x 2 x float> %a
3230 declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.iXLen(
3231 <vscale x 4 x float>,
3232 <vscale x 4 x float>,
3236 define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1, iXLen %2) nounwind {
3237 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32:
3238 ; CHECK: # %bb.0: # %entry
3239 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3240 ; CHECK-NEXT: vrgather.vx v10, v8, a0
3241 ; CHECK-NEXT: vmv.v.v v8, v10
3244 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.iXLen(
3245 <vscale x 4 x float> undef,
3246 <vscale x 4 x float> %0,
3250 ret <vscale x 4 x float> %a
3253 declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.iXLen(
3254 <vscale x 4 x float>,
3255 <vscale x 4 x float>,
3261 define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3262 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32:
3263 ; CHECK: # %bb.0: # %entry
3264 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
3265 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
3268 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.iXLen(
3269 <vscale x 4 x float> %0,
3270 <vscale x 4 x float> %1,
3272 <vscale x 4 x i1> %3,
3275 ret <vscale x 4 x float> %a
3278 declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.iXLen(
3279 <vscale x 8 x float>,
3280 <vscale x 8 x float>,
3284 define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1, iXLen %2) nounwind {
3285 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32:
3286 ; CHECK: # %bb.0: # %entry
3287 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3288 ; CHECK-NEXT: vrgather.vx v12, v8, a0
3289 ; CHECK-NEXT: vmv.v.v v8, v12
3292 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.iXLen(
3293 <vscale x 8 x float> undef,
3294 <vscale x 8 x float> %0,
3298 ret <vscale x 8 x float> %a
3301 declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.iXLen(
3302 <vscale x 8 x float>,
3303 <vscale x 8 x float>,
3309 define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3310 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32:
3311 ; CHECK: # %bb.0: # %entry
3312 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
3313 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
3316 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.iXLen(
3317 <vscale x 8 x float> %0,
3318 <vscale x 8 x float> %1,
3320 <vscale x 8 x i1> %3,
3323 ret <vscale x 8 x float> %a
3326 declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.iXLen(
3327 <vscale x 16 x float>,
3328 <vscale x 16 x float>,
3332 define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1, iXLen %2) nounwind {
3333 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32:
3334 ; CHECK: # %bb.0: # %entry
3335 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3336 ; CHECK-NEXT: vrgather.vx v16, v8, a0
3337 ; CHECK-NEXT: vmv.v.v v8, v16
3340 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.iXLen(
3341 <vscale x 16 x float> undef,
3342 <vscale x 16 x float> %0,
3346 ret <vscale x 16 x float> %a
3349 declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.iXLen(
3350 <vscale x 16 x float>,
3351 <vscale x 16 x float>,
3357 define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3358 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32:
3359 ; CHECK: # %bb.0: # %entry
3360 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
3361 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
3364 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.iXLen(
3365 <vscale x 16 x float> %0,
3366 <vscale x 16 x float> %1,
3368 <vscale x 16 x i1> %3,
3371 ret <vscale x 16 x float> %a
3374 declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.iXLen(
3375 <vscale x 1 x double>,
3376 <vscale x 1 x double>,
3380 define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1, iXLen %2) nounwind {
3381 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64:
3382 ; CHECK: # %bb.0: # %entry
3383 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3384 ; CHECK-NEXT: vrgather.vx v9, v8, a0
3385 ; CHECK-NEXT: vmv.v.v v8, v9
3388 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.iXLen(
3389 <vscale x 1 x double> undef,
3390 <vscale x 1 x double> %0,
3394 ret <vscale x 1 x double> %a
3397 declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.iXLen(
3398 <vscale x 1 x double>,
3399 <vscale x 1 x double>,
3405 define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3406 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64:
3407 ; CHECK: # %bb.0: # %entry
3408 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3409 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
3412 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.iXLen(
3413 <vscale x 1 x double> %0,
3414 <vscale x 1 x double> %1,
3416 <vscale x 1 x i1> %3,
3419 ret <vscale x 1 x double> %a
3422 declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.iXLen(
3423 <vscale x 2 x double>,
3424 <vscale x 2 x double>,
3428 define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1, iXLen %2) nounwind {
3429 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64:
3430 ; CHECK: # %bb.0: # %entry
3431 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
3432 ; CHECK-NEXT: vrgather.vx v10, v8, a0
3433 ; CHECK-NEXT: vmv.v.v v8, v10
3436 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.iXLen(
3437 <vscale x 2 x double> undef,
3438 <vscale x 2 x double> %0,
3442 ret <vscale x 2 x double> %a
3445 declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.iXLen(
3446 <vscale x 2 x double>,
3447 <vscale x 2 x double>,
3453 define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3454 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64:
3455 ; CHECK: # %bb.0: # %entry
3456 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
3457 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
3460 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.iXLen(
3461 <vscale x 2 x double> %0,
3462 <vscale x 2 x double> %1,
3464 <vscale x 2 x i1> %3,
3467 ret <vscale x 2 x double> %a
3470 declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.iXLen(
3471 <vscale x 4 x double>,
3472 <vscale x 4 x double>,
3476 define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1, iXLen %2) nounwind {
3477 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64:
3478 ; CHECK: # %bb.0: # %entry
3479 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
3480 ; CHECK-NEXT: vrgather.vx v12, v8, a0
3481 ; CHECK-NEXT: vmv.v.v v8, v12
3484 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.iXLen(
3485 <vscale x 4 x double> undef,
3486 <vscale x 4 x double> %0,
3490 ret <vscale x 4 x double> %a
3493 declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.iXLen(
3494 <vscale x 4 x double>,
3495 <vscale x 4 x double>,
3501 define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3502 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64:
3503 ; CHECK: # %bb.0: # %entry
3504 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
3505 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
3508 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.iXLen(
3509 <vscale x 4 x double> %0,
3510 <vscale x 4 x double> %1,
3512 <vscale x 4 x i1> %3,
3515 ret <vscale x 4 x double> %a
3518 declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.iXLen(
3519 <vscale x 8 x double>,
3520 <vscale x 8 x double>,
3524 define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1, iXLen %2) nounwind {
3525 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64:
3526 ; CHECK: # %bb.0: # %entry
3527 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3528 ; CHECK-NEXT: vrgather.vx v16, v8, a0
3529 ; CHECK-NEXT: vmv.v.v v8, v16
3532 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.iXLen(
3533 <vscale x 8 x double> undef,
3534 <vscale x 8 x double> %0,
3538 ret <vscale x 8 x double> %a
3541 declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.iXLen(
3542 <vscale x 8 x double>,
3543 <vscale x 8 x double>,
3549 define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3550 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64:
3551 ; CHECK: # %bb.0: # %entry
3552 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
3553 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
3556 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.iXLen(
3557 <vscale x 8 x double> %0,
3558 <vscale x 8 x double> %1,
3560 <vscale x 8 x i1> %3,
3563 ret <vscale x 8 x double> %a
3566 define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
3567 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8:
3568 ; CHECK: # %bb.0: # %entry
3569 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
3570 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3571 ; CHECK-NEXT: vmv1r.v v8, v9
3574 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.iXLen(
3575 <vscale x 1 x i8> undef,
3576 <vscale x 1 x i8> %0,
3580 ret <vscale x 1 x i8> %a
3583 define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
3584 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8:
3585 ; CHECK: # %bb.0: # %entry
3586 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
3587 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3590 %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.iXLen(
3591 <vscale x 1 x i8> %0,
3592 <vscale x 1 x i8> %1,
3594 <vscale x 1 x i1> %2,
3597 ret <vscale x 1 x i8> %a
3600 define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
3601 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8:
3602 ; CHECK: # %bb.0: # %entry
3603 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
3604 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3605 ; CHECK-NEXT: vmv1r.v v8, v9
3608 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.iXLen(
3609 <vscale x 2 x i8> undef,
3610 <vscale x 2 x i8> %0,
3614 ret <vscale x 2 x i8> %a
3617 define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
3618 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8:
3619 ; CHECK: # %bb.0: # %entry
3620 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
3621 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3624 %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.iXLen(
3625 <vscale x 2 x i8> %0,
3626 <vscale x 2 x i8> %1,
3628 <vscale x 2 x i1> %2,
3631 ret <vscale x 2 x i8> %a
3634 define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
3635 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8:
3636 ; CHECK: # %bb.0: # %entry
3637 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
3638 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3639 ; CHECK-NEXT: vmv1r.v v8, v9
3642 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.iXLen(
3643 <vscale x 4 x i8> undef,
3644 <vscale x 4 x i8> %0,
3648 ret <vscale x 4 x i8> %a
3651 define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
3652 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8:
3653 ; CHECK: # %bb.0: # %entry
3654 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
3655 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3658 %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.iXLen(
3659 <vscale x 4 x i8> %0,
3660 <vscale x 4 x i8> %1,
3662 <vscale x 4 x i1> %2,
3665 ret <vscale x 4 x i8> %a
3668 define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
3669 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8:
3670 ; CHECK: # %bb.0: # %entry
3671 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
3672 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3673 ; CHECK-NEXT: vmv.v.v v8, v9
3676 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.iXLen(
3677 <vscale x 8 x i8> undef,
3678 <vscale x 8 x i8> %0,
3682 ret <vscale x 8 x i8> %a
3685 define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
3686 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8:
3687 ; CHECK: # %bb.0: # %entry
3688 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
3689 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3692 %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.iXLen(
3693 <vscale x 8 x i8> %0,
3694 <vscale x 8 x i8> %1,
3696 <vscale x 8 x i1> %2,
3699 ret <vscale x 8 x i8> %a
3702 define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
3703 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8:
3704 ; CHECK: # %bb.0: # %entry
3705 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
3706 ; CHECK-NEXT: vrgather.vi v10, v8, 9
3707 ; CHECK-NEXT: vmv.v.v v8, v10
3710 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.iXLen(
3711 <vscale x 16 x i8> undef,
3712 <vscale x 16 x i8> %0,
3716 ret <vscale x 16 x i8> %a
3719 define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
3720 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8:
3721 ; CHECK: # %bb.0: # %entry
3722 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
3723 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
3726 %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.iXLen(
3727 <vscale x 16 x i8> %0,
3728 <vscale x 16 x i8> %1,
3730 <vscale x 16 x i1> %2,
3733 ret <vscale x 16 x i8> %a
3736 define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
3737 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8:
3738 ; CHECK: # %bb.0: # %entry
3739 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
3740 ; CHECK-NEXT: vrgather.vi v12, v8, 9
3741 ; CHECK-NEXT: vmv.v.v v8, v12
3744 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.iXLen(
3745 <vscale x 32 x i8> undef,
3746 <vscale x 32 x i8> %0,
3750 ret <vscale x 32 x i8> %a
3753 define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
3754 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8:
3755 ; CHECK: # %bb.0: # %entry
3756 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
3757 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
3760 %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.iXLen(
3761 <vscale x 32 x i8> %0,
3762 <vscale x 32 x i8> %1,
3764 <vscale x 32 x i1> %2,
3767 ret <vscale x 32 x i8> %a
3770 define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
3771 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8:
3772 ; CHECK: # %bb.0: # %entry
3773 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
3774 ; CHECK-NEXT: vrgather.vi v16, v8, 9
3775 ; CHECK-NEXT: vmv.v.v v8, v16
3778 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.iXLen(
3779 <vscale x 64 x i8> undef,
3780 <vscale x 64 x i8> %0,
3784 ret <vscale x 64 x i8> %a
3787 define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
3788 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8:
3789 ; CHECK: # %bb.0: # %entry
3790 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
3791 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
3794 %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.iXLen(
3795 <vscale x 64 x i8> %0,
3796 <vscale x 64 x i8> %1,
3798 <vscale x 64 x i1> %2,
3801 ret <vscale x 64 x i8> %a
3804 define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
3805 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16:
3806 ; CHECK: # %bb.0: # %entry
3807 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3808 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3809 ; CHECK-NEXT: vmv1r.v v8, v9
3812 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.iXLen(
3813 <vscale x 1 x i16> undef,
3814 <vscale x 1 x i16> %0,
3818 ret <vscale x 1 x i16> %a
3821 define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
3822 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16:
3823 ; CHECK: # %bb.0: # %entry
3824 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
3825 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3828 %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.iXLen(
3829 <vscale x 1 x i16> %0,
3830 <vscale x 1 x i16> %1,
3832 <vscale x 1 x i1> %2,
3835 ret <vscale x 1 x i16> %a
3838 define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
3839 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16:
3840 ; CHECK: # %bb.0: # %entry
3841 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3842 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3843 ; CHECK-NEXT: vmv1r.v v8, v9
3846 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.iXLen(
3847 <vscale x 2 x i16> undef,
3848 <vscale x 2 x i16> %0,
3852 ret <vscale x 2 x i16> %a
3855 define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
3856 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16:
3857 ; CHECK: # %bb.0: # %entry
3858 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
3859 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3862 %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.iXLen(
3863 <vscale x 2 x i16> %0,
3864 <vscale x 2 x i16> %1,
3866 <vscale x 2 x i1> %2,
3869 ret <vscale x 2 x i16> %a
3872 define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
3873 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16:
3874 ; CHECK: # %bb.0: # %entry
3875 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3876 ; CHECK-NEXT: vrgather.vi v9, v8, 9
3877 ; CHECK-NEXT: vmv.v.v v8, v9
3880 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.iXLen(
3881 <vscale x 4 x i16> undef,
3882 <vscale x 4 x i16> %0,
3886 ret <vscale x 4 x i16> %a
3889 define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
3890 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16:
3891 ; CHECK: # %bb.0: # %entry
3892 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
3893 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
3896 %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.iXLen(
3897 <vscale x 4 x i16> %0,
3898 <vscale x 4 x i16> %1,
3900 <vscale x 4 x i1> %2,
3903 ret <vscale x 4 x i16> %a
3906 define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
3907 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16:
3908 ; CHECK: # %bb.0: # %entry
3909 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3910 ; CHECK-NEXT: vrgather.vi v10, v8, 9
3911 ; CHECK-NEXT: vmv.v.v v8, v10
3914 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.iXLen(
3915 <vscale x 8 x i16> undef,
3916 <vscale x 8 x i16> %0,
3920 ret <vscale x 8 x i16> %a
3923 define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
3924 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16:
3925 ; CHECK: # %bb.0: # %entry
3926 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
3927 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
3930 %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.iXLen(
3931 <vscale x 8 x i16> %0,
3932 <vscale x 8 x i16> %1,
3934 <vscale x 8 x i1> %2,
3937 ret <vscale x 8 x i16> %a
3940 define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
3941 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16:
3942 ; CHECK: # %bb.0: # %entry
3943 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3944 ; CHECK-NEXT: vrgather.vi v12, v8, 9
3945 ; CHECK-NEXT: vmv.v.v v8, v12
3948 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.iXLen(
3949 <vscale x 16 x i16> undef,
3950 <vscale x 16 x i16> %0,
3954 ret <vscale x 16 x i16> %a
3957 define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
3958 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16:
3959 ; CHECK: # %bb.0: # %entry
3960 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
3961 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
3964 %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.iXLen(
3965 <vscale x 16 x i16> %0,
3966 <vscale x 16 x i16> %1,
3968 <vscale x 16 x i1> %2,
3971 ret <vscale x 16 x i16> %a
3974 define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
3975 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16:
3976 ; CHECK: # %bb.0: # %entry
3977 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3978 ; CHECK-NEXT: vrgather.vi v16, v8, 9
3979 ; CHECK-NEXT: vmv.v.v v8, v16
3982 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.iXLen(
3983 <vscale x 32 x i16> undef,
3984 <vscale x 32 x i16> %0,
3988 ret <vscale x 32 x i16> %a
3991 define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
3992 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16:
3993 ; CHECK: # %bb.0: # %entry
3994 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
3995 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
3998 %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.iXLen(
3999 <vscale x 32 x i16> %0,
4000 <vscale x 32 x i16> %1,
4002 <vscale x 32 x i1> %2,
4005 ret <vscale x 32 x i16> %a
4008 define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
4009 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32:
4010 ; CHECK: # %bb.0: # %entry
4011 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4012 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4013 ; CHECK-NEXT: vmv1r.v v8, v9
4016 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.iXLen(
4017 <vscale x 1 x i32> undef,
4018 <vscale x 1 x i32> %0,
4022 ret <vscale x 1 x i32> %a
4025 define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4026 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32:
4027 ; CHECK: # %bb.0: # %entry
4028 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
4029 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4032 %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.iXLen(
4033 <vscale x 1 x i32> %0,
4034 <vscale x 1 x i32> %1,
4036 <vscale x 1 x i1> %2,
4039 ret <vscale x 1 x i32> %a
4042 define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
4043 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32:
4044 ; CHECK: # %bb.0: # %entry
4045 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4046 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4047 ; CHECK-NEXT: vmv.v.v v8, v9
4050 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.iXLen(
4051 <vscale x 2 x i32> undef,
4052 <vscale x 2 x i32> %0,
4056 ret <vscale x 2 x i32> %a
4059 define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4060 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32:
4061 ; CHECK: # %bb.0: # %entry
4062 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
4063 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4066 %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.iXLen(
4067 <vscale x 2 x i32> %0,
4068 <vscale x 2 x i32> %1,
4070 <vscale x 2 x i1> %2,
4073 ret <vscale x 2 x i32> %a
4076 define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
4077 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32:
4078 ; CHECK: # %bb.0: # %entry
4079 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4080 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4081 ; CHECK-NEXT: vmv.v.v v8, v10
4084 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.iXLen(
4085 <vscale x 4 x i32> undef,
4086 <vscale x 4 x i32> %0,
4090 ret <vscale x 4 x i32> %a
4093 define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4094 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32:
4095 ; CHECK: # %bb.0: # %entry
4096 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
4097 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4100 %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.iXLen(
4101 <vscale x 4 x i32> %0,
4102 <vscale x 4 x i32> %1,
4104 <vscale x 4 x i1> %2,
4107 ret <vscale x 4 x i32> %a
4110 define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
4111 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32:
4112 ; CHECK: # %bb.0: # %entry
4113 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4114 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4115 ; CHECK-NEXT: vmv.v.v v8, v12
4118 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.iXLen(
4119 <vscale x 8 x i32> undef,
4120 <vscale x 8 x i32> %0,
4124 ret <vscale x 8 x i32> %a
4127 define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4128 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32:
4129 ; CHECK: # %bb.0: # %entry
4130 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
4131 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4134 %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.iXLen(
4135 <vscale x 8 x i32> %0,
4136 <vscale x 8 x i32> %1,
4138 <vscale x 8 x i1> %2,
4141 ret <vscale x 8 x i32> %a
4144 define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
4145 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32:
4146 ; CHECK: # %bb.0: # %entry
4147 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4148 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4149 ; CHECK-NEXT: vmv.v.v v8, v16
4152 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.iXLen(
4153 <vscale x 16 x i32> undef,
4154 <vscale x 16 x i32> %0,
4158 ret <vscale x 16 x i32> %a
4161 define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
4162 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32:
4163 ; CHECK: # %bb.0: # %entry
4164 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
4165 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4168 %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.iXLen(
4169 <vscale x 16 x i32> %0,
4170 <vscale x 16 x i32> %1,
4172 <vscale x 16 x i1> %2,
4175 ret <vscale x 16 x i32> %a
4178 define <vscale x 1 x i64> @intrinsic_vrgather_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
4179 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i64_nxv1i64:
4180 ; CHECK: # %bb.0: # %entry
4181 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4182 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4183 ; CHECK-NEXT: vmv.v.v v8, v9
4186 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.iXLen(
4187 <vscale x 1 x i64> undef,
4188 <vscale x 1 x i64> %0,
4192 ret <vscale x 1 x i64> %a
4195 define <vscale x 1 x i64> @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4196 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64:
4197 ; CHECK: # %bb.0: # %entry
4198 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
4199 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4202 %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.iXLen(
4203 <vscale x 1 x i64> %0,
4204 <vscale x 1 x i64> %1,
4206 <vscale x 1 x i1> %2,
4209 ret <vscale x 1 x i64> %a
4212 define <vscale x 2 x i64> @intrinsic_vrgather_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
4213 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i64_nxv2i64:
4214 ; CHECK: # %bb.0: # %entry
4215 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4216 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4217 ; CHECK-NEXT: vmv.v.v v8, v10
4220 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.iXLen(
4221 <vscale x 2 x i64> undef,
4222 <vscale x 2 x i64> %0,
4226 ret <vscale x 2 x i64> %a
4229 define <vscale x 2 x i64> @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4230 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64:
4231 ; CHECK: # %bb.0: # %entry
4232 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
4233 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4236 %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.iXLen(
4237 <vscale x 2 x i64> %0,
4238 <vscale x 2 x i64> %1,
4240 <vscale x 2 x i1> %2,
4243 ret <vscale x 2 x i64> %a
4246 define <vscale x 4 x i64> @intrinsic_vrgather_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
4247 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i64_nxv4i64:
4248 ; CHECK: # %bb.0: # %entry
4249 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4250 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4251 ; CHECK-NEXT: vmv.v.v v8, v12
4254 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.iXLen(
4255 <vscale x 4 x i64> undef,
4256 <vscale x 4 x i64> %0,
4260 ret <vscale x 4 x i64> %a
4263 define <vscale x 4 x i64> @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4264 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64:
4265 ; CHECK: # %bb.0: # %entry
4266 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
4267 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4270 %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.iXLen(
4271 <vscale x 4 x i64> %0,
4272 <vscale x 4 x i64> %1,
4274 <vscale x 4 x i1> %2,
4277 ret <vscale x 4 x i64> %a
4280 define <vscale x 8 x i64> @intrinsic_vrgather_vi_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
4281 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i64_nxv8i64:
4282 ; CHECK: # %bb.0: # %entry
4283 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4284 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4285 ; CHECK-NEXT: vmv.v.v v8, v16
4288 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.iXLen(
4289 <vscale x 8 x i64> undef,
4290 <vscale x 8 x i64> %0,
4294 ret <vscale x 8 x i64> %a
4297 define <vscale x 8 x i64> @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4298 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64:
4299 ; CHECK: # %bb.0: # %entry
4300 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
4301 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4304 %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.iXLen(
4305 <vscale x 8 x i64> %0,
4306 <vscale x 8 x i64> %1,
4308 <vscale x 8 x i1> %2,
4311 ret <vscale x 8 x i64> %a
4314 define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
4315 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16:
4316 ; CHECK: # %bb.0: # %entry
4317 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4318 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4319 ; CHECK-NEXT: vmv1r.v v8, v9
4322 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.iXLen(
4323 <vscale x 1 x half> undef,
4324 <vscale x 1 x half> %0,
4328 ret <vscale x 1 x half> %a
4331 define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4332 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16:
4333 ; CHECK: # %bb.0: # %entry
4334 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
4335 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4338 %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.iXLen(
4339 <vscale x 1 x half> %0,
4340 <vscale x 1 x half> %1,
4342 <vscale x 1 x i1> %2,
4345 ret <vscale x 1 x half> %a
4348 define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
4349 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16:
4350 ; CHECK: # %bb.0: # %entry
4351 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4352 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4353 ; CHECK-NEXT: vmv1r.v v8, v9
4356 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.iXLen(
4357 <vscale x 2 x half> undef,
4358 <vscale x 2 x half> %0,
4362 ret <vscale x 2 x half> %a
4365 define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4366 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16:
4367 ; CHECK: # %bb.0: # %entry
4368 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
4369 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4372 %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.iXLen(
4373 <vscale x 2 x half> %0,
4374 <vscale x 2 x half> %1,
4376 <vscale x 2 x i1> %2,
4379 ret <vscale x 2 x half> %a
4382 define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
4383 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16:
4384 ; CHECK: # %bb.0: # %entry
4385 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4386 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4387 ; CHECK-NEXT: vmv.v.v v8, v9
4390 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.iXLen(
4391 <vscale x 4 x half> undef,
4392 <vscale x 4 x half> %0,
4396 ret <vscale x 4 x half> %a
4399 define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4400 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16:
4401 ; CHECK: # %bb.0: # %entry
4402 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
4403 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4406 %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.iXLen(
4407 <vscale x 4 x half> %0,
4408 <vscale x 4 x half> %1,
4410 <vscale x 4 x i1> %2,
4413 ret <vscale x 4 x half> %a
4416 define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
4417 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16:
4418 ; CHECK: # %bb.0: # %entry
4419 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4420 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4421 ; CHECK-NEXT: vmv.v.v v8, v10
4424 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.iXLen(
4425 <vscale x 8 x half> undef,
4426 <vscale x 8 x half> %0,
4430 ret <vscale x 8 x half> %a
4433 define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4434 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16:
4435 ; CHECK: # %bb.0: # %entry
4436 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
4437 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4440 %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.iXLen(
4441 <vscale x 8 x half> %0,
4442 <vscale x 8 x half> %1,
4444 <vscale x 8 x i1> %2,
4447 ret <vscale x 8 x half> %a
4450 define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
4451 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16:
4452 ; CHECK: # %bb.0: # %entry
4453 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4454 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4455 ; CHECK-NEXT: vmv.v.v v8, v12
4458 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.iXLen(
4459 <vscale x 16 x half> undef,
4460 <vscale x 16 x half> %0,
4464 ret <vscale x 16 x half> %a
4467 define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
4468 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16:
4469 ; CHECK: # %bb.0: # %entry
4470 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
4471 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4474 %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.iXLen(
4475 <vscale x 16 x half> %0,
4476 <vscale x 16 x half> %1,
4478 <vscale x 16 x i1> %2,
4481 ret <vscale x 16 x half> %a
4484 define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
4485 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16:
4486 ; CHECK: # %bb.0: # %entry
4487 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4488 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4489 ; CHECK-NEXT: vmv.v.v v8, v16
4492 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.iXLen(
4493 <vscale x 32 x half> undef,
4494 <vscale x 32 x half> %0,
4498 ret <vscale x 32 x half> %a
4501 define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
4502 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16:
4503 ; CHECK: # %bb.0: # %entry
4504 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
4505 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4508 %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.iXLen(
4509 <vscale x 32 x half> %0,
4510 <vscale x 32 x half> %1,
4512 <vscale x 32 x i1> %2,
4515 ret <vscale x 32 x half> %a
4518 define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
4519 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32:
4520 ; CHECK: # %bb.0: # %entry
4521 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4522 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4523 ; CHECK-NEXT: vmv1r.v v8, v9
4526 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.iXLen(
4527 <vscale x 1 x float> undef,
4528 <vscale x 1 x float> %0,
4532 ret <vscale x 1 x float> %a
4535 define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4536 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32:
4537 ; CHECK: # %bb.0: # %entry
4538 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
4539 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4542 %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.iXLen(
4543 <vscale x 1 x float> %0,
4544 <vscale x 1 x float> %1,
4546 <vscale x 1 x i1> %2,
4549 ret <vscale x 1 x float> %a
4552 define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
4553 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32:
4554 ; CHECK: # %bb.0: # %entry
4555 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4556 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4557 ; CHECK-NEXT: vmv.v.v v8, v9
4560 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.iXLen(
4561 <vscale x 2 x float> undef,
4562 <vscale x 2 x float> %0,
4566 ret <vscale x 2 x float> %a
4569 define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4570 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32:
4571 ; CHECK: # %bb.0: # %entry
4572 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
4573 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4576 %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.iXLen(
4577 <vscale x 2 x float> %0,
4578 <vscale x 2 x float> %1,
4580 <vscale x 2 x i1> %2,
4583 ret <vscale x 2 x float> %a
4586 define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
4587 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32:
4588 ; CHECK: # %bb.0: # %entry
4589 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4590 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4591 ; CHECK-NEXT: vmv.v.v v8, v10
4594 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.iXLen(
4595 <vscale x 4 x float> undef,
4596 <vscale x 4 x float> %0,
4600 ret <vscale x 4 x float> %a
4603 define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4604 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32:
4605 ; CHECK: # %bb.0: # %entry
4606 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
4607 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4610 %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.iXLen(
4611 <vscale x 4 x float> %0,
4612 <vscale x 4 x float> %1,
4614 <vscale x 4 x i1> %2,
4617 ret <vscale x 4 x float> %a
4620 define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
4621 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32:
4622 ; CHECK: # %bb.0: # %entry
4623 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4624 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4625 ; CHECK-NEXT: vmv.v.v v8, v12
4628 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.iXLen(
4629 <vscale x 8 x float> undef,
4630 <vscale x 8 x float> %0,
4634 ret <vscale x 8 x float> %a
4637 define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4638 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32:
4639 ; CHECK: # %bb.0: # %entry
4640 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
4641 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4644 %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.iXLen(
4645 <vscale x 8 x float> %0,
4646 <vscale x 8 x float> %1,
4648 <vscale x 8 x i1> %2,
4651 ret <vscale x 8 x float> %a
4654 define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
4655 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32:
4656 ; CHECK: # %bb.0: # %entry
4657 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4658 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4659 ; CHECK-NEXT: vmv.v.v v8, v16
4662 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.iXLen(
4663 <vscale x 16 x float> undef,
4664 <vscale x 16 x float> %0,
4668 ret <vscale x 16 x float> %a
4671 define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
4672 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32:
4673 ; CHECK: # %bb.0: # %entry
4674 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
4675 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4678 %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.iXLen(
4679 <vscale x 16 x float> %0,
4680 <vscale x 16 x float> %1,
4682 <vscale x 16 x i1> %2,
4685 ret <vscale x 16 x float> %a
4688 define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
4689 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64:
4690 ; CHECK: # %bb.0: # %entry
4691 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4692 ; CHECK-NEXT: vrgather.vi v9, v8, 9
4693 ; CHECK-NEXT: vmv.v.v v8, v9
4696 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.iXLen(
4697 <vscale x 1 x double> undef,
4698 <vscale x 1 x double> %0,
4702 ret <vscale x 1 x double> %a
4705 define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4706 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64:
4707 ; CHECK: # %bb.0: # %entry
4708 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
4709 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
4712 %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.iXLen(
4713 <vscale x 1 x double> %0,
4714 <vscale x 1 x double> %1,
4716 <vscale x 1 x i1> %2,
4719 ret <vscale x 1 x double> %a
4722 define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
4723 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64:
4724 ; CHECK: # %bb.0: # %entry
4725 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4726 ; CHECK-NEXT: vrgather.vi v10, v8, 9
4727 ; CHECK-NEXT: vmv.v.v v8, v10
4730 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.iXLen(
4731 <vscale x 2 x double> undef,
4732 <vscale x 2 x double> %0,
4736 ret <vscale x 2 x double> %a
4739 define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4740 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64:
4741 ; CHECK: # %bb.0: # %entry
4742 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
4743 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
4746 %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.iXLen(
4747 <vscale x 2 x double> %0,
4748 <vscale x 2 x double> %1,
4750 <vscale x 2 x i1> %2,
4753 ret <vscale x 2 x double> %a
4756 define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
4757 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64:
4758 ; CHECK: # %bb.0: # %entry
4759 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4760 ; CHECK-NEXT: vrgather.vi v12, v8, 9
4761 ; CHECK-NEXT: vmv.v.v v8, v12
4764 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.iXLen(
4765 <vscale x 4 x double> undef,
4766 <vscale x 4 x double> %0,
4770 ret <vscale x 4 x double> %a
4773 define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4774 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64:
4775 ; CHECK: # %bb.0: # %entry
4776 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
4777 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
4780 %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.iXLen(
4781 <vscale x 4 x double> %0,
4782 <vscale x 4 x double> %1,
4784 <vscale x 4 x i1> %2,
4787 ret <vscale x 4 x double> %a
4790 define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
4791 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64:
4792 ; CHECK: # %bb.0: # %entry
4793 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4794 ; CHECK-NEXT: vrgather.vi v16, v8, 9
4795 ; CHECK-NEXT: vmv.v.v v8, v16
4798 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.iXLen(
4799 <vscale x 8 x double> undef,
4800 <vscale x 8 x double> %0,
4804 ret <vscale x 8 x double> %a
4807 define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4808 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64:
4809 ; CHECK: # %bb.0: # %entry
4810 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
4811 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
4814 %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.iXLen(
4815 <vscale x 8 x double> %0,
4816 <vscale x 8 x double> %1,
4818 <vscale x 8 x i1> %2,
4821 ret <vscale x 8 x double> %a
4824 declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.iXLen(
4825 <vscale x 1 x bfloat>,
4826 <vscale x 1 x bfloat>,
4830 define <vscale x 1 x bfloat> @intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
4831 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16:
4832 ; CHECK: # %bb.0: # %entry
4833 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4834 ; CHECK-NEXT: vrgather.vv v10, v8, v9
4835 ; CHECK-NEXT: vmv1r.v v8, v10
4838 %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.iXLen(
4839 <vscale x 1 x bfloat> undef,
4840 <vscale x 1 x bfloat> %0,
4841 <vscale x 1 x i16> %1,
4844 ret <vscale x 1 x bfloat> %a
4847 declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen(
4848 <vscale x 1 x bfloat>,
4849 <vscale x 1 x bfloat>,
4855 define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4856 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16:
4857 ; CHECK: # %bb.0: # %entry
4858 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
4859 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
4862 %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen(
4863 <vscale x 1 x bfloat> %0,
4864 <vscale x 1 x bfloat> %1,
4865 <vscale x 1 x i16> %2,
4866 <vscale x 1 x i1> %3,
4869 ret <vscale x 1 x bfloat> %a
4872 declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.iXLen(
4873 <vscale x 2 x bfloat>,
4874 <vscale x 2 x bfloat>,
4878 define <vscale x 2 x bfloat> @intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
4879 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16:
4880 ; CHECK: # %bb.0: # %entry
4881 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4882 ; CHECK-NEXT: vrgather.vv v10, v8, v9
4883 ; CHECK-NEXT: vmv1r.v v8, v10
4886 %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.iXLen(
4887 <vscale x 2 x bfloat> undef,
4888 <vscale x 2 x bfloat> %0,
4889 <vscale x 2 x i16> %1,
4892 ret <vscale x 2 x bfloat> %a
4895 declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen(
4896 <vscale x 2 x bfloat>,
4897 <vscale x 2 x bfloat>,
4903 define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4904 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16:
4905 ; CHECK: # %bb.0: # %entry
4906 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
4907 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
4910 %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen(
4911 <vscale x 2 x bfloat> %0,
4912 <vscale x 2 x bfloat> %1,
4913 <vscale x 2 x i16> %2,
4914 <vscale x 2 x i1> %3,
4917 ret <vscale x 2 x bfloat> %a
4920 declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.iXLen(
4921 <vscale x 4 x bfloat>,
4922 <vscale x 4 x bfloat>,
4926 define <vscale x 4 x bfloat> @intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
4927 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16:
4928 ; CHECK: # %bb.0: # %entry
4929 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4930 ; CHECK-NEXT: vrgather.vv v10, v8, v9
4931 ; CHECK-NEXT: vmv.v.v v8, v10
4934 %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.iXLen(
4935 <vscale x 4 x bfloat> undef,
4936 <vscale x 4 x bfloat> %0,
4937 <vscale x 4 x i16> %1,
4940 ret <vscale x 4 x bfloat> %a
4943 declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen(
4944 <vscale x 4 x bfloat>,
4945 <vscale x 4 x bfloat>,
4951 define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4952 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16:
4953 ; CHECK: # %bb.0: # %entry
4954 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
4955 ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
4958 %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen(
4959 <vscale x 4 x bfloat> %0,
4960 <vscale x 4 x bfloat> %1,
4961 <vscale x 4 x i16> %2,
4962 <vscale x 4 x i1> %3,
4965 ret <vscale x 4 x bfloat> %a
4968 declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.iXLen(
4969 <vscale x 8 x bfloat>,
4970 <vscale x 8 x bfloat>,
4974 define <vscale x 8 x bfloat> @intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
4975 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16:
4976 ; CHECK: # %bb.0: # %entry
4977 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4978 ; CHECK-NEXT: vrgather.vv v12, v8, v10
4979 ; CHECK-NEXT: vmv.v.v v8, v12
4982 %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.iXLen(
4983 <vscale x 8 x bfloat> undef,
4984 <vscale x 8 x bfloat> %0,
4985 <vscale x 8 x i16> %1,
4988 ret <vscale x 8 x bfloat> %a
4991 declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen(
4992 <vscale x 8 x bfloat>,
4993 <vscale x 8 x bfloat>,
4999 define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5000 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16:
5001 ; CHECK: # %bb.0: # %entry
5002 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
5003 ; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t
5006 %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen(
5007 <vscale x 8 x bfloat> %0,
5008 <vscale x 8 x bfloat> %1,
5009 <vscale x 8 x i16> %2,
5010 <vscale x 8 x i1> %3,
5013 ret <vscale x 8 x bfloat> %a
5016 declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.iXLen(
5017 <vscale x 16 x bfloat>,
5018 <vscale x 16 x bfloat>,
5019 <vscale x 16 x i16>,
5022 define <vscale x 16 x bfloat> @intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
5023 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16:
5024 ; CHECK: # %bb.0: # %entry
5025 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
5026 ; CHECK-NEXT: vrgather.vv v16, v8, v12
5027 ; CHECK-NEXT: vmv.v.v v8, v16
5030 %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.iXLen(
5031 <vscale x 16 x bfloat> undef,
5032 <vscale x 16 x bfloat> %0,
5033 <vscale x 16 x i16> %1,
5036 ret <vscale x 16 x bfloat> %a
5039 declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen(
5040 <vscale x 16 x bfloat>,
5041 <vscale x 16 x bfloat>,
5042 <vscale x 16 x i16>,
5047 define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
5048 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16:
5049 ; CHECK: # %bb.0: # %entry
5050 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
5051 ; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t
5054 %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen(
5055 <vscale x 16 x bfloat> %0,
5056 <vscale x 16 x bfloat> %1,
5057 <vscale x 16 x i16> %2,
5058 <vscale x 16 x i1> %3,
5061 ret <vscale x 16 x bfloat> %a
5064 declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.iXLen(
5065 <vscale x 32 x bfloat>,
5066 <vscale x 32 x bfloat>,
5067 <vscale x 32 x i16>,
5070 define <vscale x 32 x bfloat> @intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
5071 ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16:
5072 ; CHECK: # %bb.0: # %entry
5073 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
5074 ; CHECK-NEXT: vrgather.vv v24, v8, v16
5075 ; CHECK-NEXT: vmv.v.v v8, v24
5078 %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.iXLen(
5079 <vscale x 32 x bfloat> undef,
5080 <vscale x 32 x bfloat> %0,
5081 <vscale x 32 x i16> %1,
5084 ret <vscale x 32 x bfloat> %a
5087 declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen(
5088 <vscale x 32 x bfloat>,
5089 <vscale x 32 x bfloat>,
5090 <vscale x 32 x i16>,
5095 define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
5096 ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16:
5097 ; CHECK: # %bb.0: # %entry
5098 ; CHECK-NEXT: vl8re16.v v24, (a0)
5099 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
5100 ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
5103 %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen(
5104 <vscale x 32 x bfloat> %0,
5105 <vscale x 32 x bfloat> %1,
5106 <vscale x 32 x i16> %2,
5107 <vscale x 32 x i1> %3,
5110 ret <vscale x 32 x bfloat> %a
5113 declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
5114 <vscale x 1 x bfloat>,
5115 <vscale x 1 x bfloat>,
5119 define <vscale x 1 x bfloat> @intrinsic_vrgather_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5120 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1bf16_nxv1bf16:
5121 ; CHECK: # %bb.0: # %entry
5122 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
5123 ; CHECK-NEXT: vrgather.vx v9, v8, a0
5124 ; CHECK-NEXT: vmv1r.v v8, v9
5127 %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
5128 <vscale x 1 x bfloat> undef,
5129 <vscale x 1 x bfloat> %0,
5133 ret <vscale x 1 x bfloat> %a
5136 declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
5137 <vscale x 1 x bfloat>,
5138 <vscale x 1 x bfloat>,
5144 define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
5145 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16:
5146 ; CHECK: # %bb.0: # %entry
5147 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
5148 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
5151 %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
5152 <vscale x 1 x bfloat> %0,
5153 <vscale x 1 x bfloat> %1,
5155 <vscale x 1 x i1> %3,
5158 ret <vscale x 1 x bfloat> %a
5161 declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
5162 <vscale x 2 x bfloat>,
5163 <vscale x 2 x bfloat>,
5167 define <vscale x 2 x bfloat> @intrinsic_vrgather_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5168 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2bf16_nxv2bf16:
5169 ; CHECK: # %bb.0: # %entry
5170 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
5171 ; CHECK-NEXT: vrgather.vx v9, v8, a0
5172 ; CHECK-NEXT: vmv1r.v v8, v9
5175 %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
5176 <vscale x 2 x bfloat> undef,
5177 <vscale x 2 x bfloat> %0,
5181 ret <vscale x 2 x bfloat> %a
5184 declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen(
5185 <vscale x 2 x bfloat>,
5186 <vscale x 2 x bfloat>,
5192 define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
5193 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16:
5194 ; CHECK: # %bb.0: # %entry
5195 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
5196 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
5199 %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf1bf16XLen(
5200 <vscale x 2 x bfloat> %0,
5201 <vscale x 2 x bfloat> %1,
5203 <vscale x 2 x i1> %3,
5206 ret <vscale x 2 x bfloat> %a
5209 declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
5210 <vscale x 4 x bfloat>,
5211 <vscale x 4 x bfloat>,
5215 define <vscale x 4 x bfloat> @intrinsic_vrgather_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5216 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4bf16_nxv4bf16:
5217 ; CHECK: # %bb.0: # %entry
5218 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
5219 ; CHECK-NEXT: vrgather.vx v9, v8, a0
5220 ; CHECK-NEXT: vmv.v.v v8, v9
5223 %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
5224 <vscale x 4 x bfloat> undef,
5225 <vscale x 4 x bfloat> %0,
5229 ret <vscale x 4 x bfloat> %a
5232 declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
5233 <vscale x 4 x bfloat>,
5234 <vscale x 4 x bfloat>,
5240 define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5241 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16:
5242 ; CHECK: # %bb.0: # %entry
5243 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
5244 ; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t
5247 %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
5248 <vscale x 4 x bfloat> %0,
5249 <vscale x 4 x bfloat> %1,
5251 <vscale x 4 x i1> %3,
5254 ret <vscale x 4 x bfloat> %a
5257 declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
5258 <vscale x 8 x bfloat>,
5259 <vscale x 8 x bfloat>,
5263 define <vscale x 8 x bfloat> @intrinsic_vrgather_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5264 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8bf16_nxv8bf16:
5265 ; CHECK: # %bb.0: # %entry
5266 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
5267 ; CHECK-NEXT: vrgather.vx v10, v8, a0
5268 ; CHECK-NEXT: vmv.v.v v8, v10
5271 %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
5272 <vscale x 8 x bfloat> undef,
5273 <vscale x 8 x bfloat> %0,
5277 ret <vscale x 8 x bfloat> %a
5280 declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
5281 <vscale x 8 x bfloat>,
5282 <vscale x 8 x bfloat>,
5288 define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5289 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16:
5290 ; CHECK: # %bb.0: # %entry
5291 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
5292 ; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t
5295 %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
5296 <vscale x 8 x bfloat> %0,
5297 <vscale x 8 x bfloat> %1,
5299 <vscale x 8 x i1> %3,
5302 ret <vscale x 8 x bfloat> %a
5305 declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
5306 <vscale x 16 x bfloat>,
5307 <vscale x 16 x bfloat>,
5311 define <vscale x 16 x bfloat> @intrinsic_vrgather_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5312 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16bf16_nxv16bf16:
5313 ; CHECK: # %bb.0: # %entry
5314 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
5315 ; CHECK-NEXT: vrgather.vx v12, v8, a0
5316 ; CHECK-NEXT: vmv.v.v v8, v12
5319 %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
5320 <vscale x 16 x bfloat> undef,
5321 <vscale x 16 x bfloat> %0,
5325 ret <vscale x 16 x bfloat> %a
5328 declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
5329 <vscale x 16 x bfloat>,
5330 <vscale x 16 x bfloat>,
5336 define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
5337 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16:
5338 ; CHECK: # %bb.0: # %entry
5339 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
5340 ; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t
5343 %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
5344 <vscale x 16 x bfloat> %0,
5345 <vscale x 16 x bfloat> %1,
5347 <vscale x 16 x i1> %3,
5350 ret <vscale x 16 x bfloat> %a
5353 declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
5354 <vscale x 32 x bfloat>,
5355 <vscale x 32 x bfloat>,
5359 define <vscale x 32 x bfloat> @intrinsic_vrgather_vx_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5360 ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32bf16_nxv32bf16:
5361 ; CHECK: # %bb.0: # %entry
5362 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
5363 ; CHECK-NEXT: vrgather.vx v16, v8, a0
5364 ; CHECK-NEXT: vmv.v.v v8, v16
5367 %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
5368 <vscale x 32 x bfloat> undef,
5369 <vscale x 32 x bfloat> %0,
5373 ret <vscale x 32 x bfloat> %a
5376 declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
5377 <vscale x 32 x bfloat>,
5378 <vscale x 32 x bfloat>,
5384 define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
5385 ; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16:
5386 ; CHECK: # %bb.0: # %entry
5387 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
5388 ; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t
5391 %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
5392 <vscale x 32 x bfloat> %0,
5393 <vscale x 32 x bfloat> %1,
5395 <vscale x 32 x i1> %3,
5398 ret <vscale x 32 x bfloat> %a
5401 define <vscale x 1 x bfloat> @intrinsic_vrgather_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
5402 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1bf16_nxv1bf16:
5403 ; CHECK: # %bb.0: # %entry
5404 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
5405 ; CHECK-NEXT: vrgather.vi v9, v8, 9
5406 ; CHECK-NEXT: vmv1r.v v8, v9
5409 %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
5410 <vscale x 1 x bfloat> undef,
5411 <vscale x 1 x bfloat> %0,
5415 ret <vscale x 1 x bfloat> %a
5418 define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
5419 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16:
5420 ; CHECK: # %bb.0: # %entry
5421 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
5422 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
5425 %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
5426 <vscale x 1 x bfloat> %0,
5427 <vscale x 1 x bfloat> %1,
5429 <vscale x 1 x i1> %2,
5432 ret <vscale x 1 x bfloat> %a
5435 define <vscale x 2 x bfloat> @intrinsic_vrgather_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
5436 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2bf16_nxv2bf16:
5437 ; CHECK: # %bb.0: # %entry
5438 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
5439 ; CHECK-NEXT: vrgather.vi v9, v8, 9
5440 ; CHECK-NEXT: vmv1r.v v8, v9
5443 %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
5444 <vscale x 2 x bfloat> undef,
5445 <vscale x 2 x bfloat> %0,
5449 ret <vscale x 2 x bfloat> %a
5452 define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
5453 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16:
5454 ; CHECK: # %bb.0: # %entry
5455 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
5456 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
5459 %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen(
5460 <vscale x 2 x bfloat> %0,
5461 <vscale x 2 x bfloat> %1,
5463 <vscale x 2 x i1> %2,
5466 ret <vscale x 2 x bfloat> %a
5469 define <vscale x 4 x bfloat> @intrinsic_vrgather_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
5470 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4bf16_nxv4bf16:
5471 ; CHECK: # %bb.0: # %entry
5472 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
5473 ; CHECK-NEXT: vrgather.vi v9, v8, 9
5474 ; CHECK-NEXT: vmv.v.v v8, v9
5477 %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
5478 <vscale x 4 x bfloat> undef,
5479 <vscale x 4 x bfloat> %0,
5483 ret <vscale x 4 x bfloat> %a
5486 define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
5487 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16:
5488 ; CHECK: # %bb.0: # %entry
5489 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
5490 ; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t
5493 %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
5494 <vscale x 4 x bfloat> %0,
5495 <vscale x 4 x bfloat> %1,
5497 <vscale x 4 x i1> %2,
5500 ret <vscale x 4 x bfloat> %a
5503 define <vscale x 8 x bfloat> @intrinsic_vrgather_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
5504 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8bf16_nxv8bf16:
5505 ; CHECK: # %bb.0: # %entry
5506 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
5507 ; CHECK-NEXT: vrgather.vi v10, v8, 9
5508 ; CHECK-NEXT: vmv.v.v v8, v10
5511 %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
5512 <vscale x 8 x bfloat> undef,
5513 <vscale x 8 x bfloat> %0,
5517 ret <vscale x 8 x bfloat> %a
5520 define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
5521 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16:
5522 ; CHECK: # %bb.0: # %entry
5523 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
5524 ; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t
5527 %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
5528 <vscale x 8 x bfloat> %0,
5529 <vscale x 8 x bfloat> %1,
5531 <vscale x 8 x i1> %2,
5534 ret <vscale x 8 x bfloat> %a
5537 define <vscale x 16 x bfloat> @intrinsic_vrgather_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
5538 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16bf16_nxv16bf16:
5539 ; CHECK: # %bb.0: # %entry
5540 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
5541 ; CHECK-NEXT: vrgather.vi v12, v8, 9
5542 ; CHECK-NEXT: vmv.v.v v8, v12
5545 %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
5546 <vscale x 16 x bfloat> undef,
5547 <vscale x 16 x bfloat> %0,
5551 ret <vscale x 16 x bfloat> %a
5554 define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
5555 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16:
5556 ; CHECK: # %bb.0: # %entry
5557 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
5558 ; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t
5561 %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
5562 <vscale x 16 x bfloat> %0,
5563 <vscale x 16 x bfloat> %1,
5565 <vscale x 16 x i1> %2,
5568 ret <vscale x 16 x bfloat> %a
5571 define <vscale x 32 x bfloat> @intrinsic_vrgather_vi_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
5572 ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32bf16_nxv32bf16:
5573 ; CHECK: # %bb.0: # %entry
5574 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
5575 ; CHECK-NEXT: vrgather.vi v16, v8, 9
5576 ; CHECK-NEXT: vmv.v.v v8, v16
5579 %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
5580 <vscale x 32 x bfloat> undef,
5581 <vscale x 32 x bfloat> %0,
5585 ret <vscale x 32 x bfloat> %a
5588 define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
5589 ; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16:
5590 ; CHECK: # %bb.0: # %entry
5591 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
5592 ; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t
5595 %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
5596 <vscale x 32 x bfloat> %0,
5597 <vscale x 32 x bfloat> %1,
5599 <vscale x 32 x i1> %2,
5602 ret <vscale x 32 x bfloat> %a