1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
13 define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
17 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
18 ; CHECK-NEXT: vmv1r.v v8, v9
21 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
22 <vscale x 1 x i8> undef,
27 ret <vscale x 1 x i8> %a
30 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
38 define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
42 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
52 ret <vscale x 1 x i8> %a
55 declare <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
61 define <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
65 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
66 ; CHECK-NEXT: vmv1r.v v8, v9
69 %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
70 <vscale x 2 x i8> undef,
75 ret <vscale x 2 x i8> %a
78 declare <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
86 define <vscale x 2 x i8> @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
90 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
93 %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
100 ret <vscale x 2 x i8> %a
103 declare <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
109 define <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
113 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
114 ; CHECK-NEXT: vmv1r.v v8, v9
117 %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
118 <vscale x 4 x i8> undef,
119 <vscale x 4 x i8> %0,
123 ret <vscale x 4 x i8> %a
126 declare <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
134 define <vscale x 4 x i8> @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
138 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
141 %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
142 <vscale x 4 x i8> %0,
143 <vscale x 4 x i8> %1,
145 <vscale x 4 x i1> %3,
148 ret <vscale x 4 x i8> %a
151 declare <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
157 define <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
161 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
162 ; CHECK-NEXT: vmv.v.v v8, v9
165 %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
166 <vscale x 8 x i8> undef,
167 <vscale x 8 x i8> %0,
171 ret <vscale x 8 x i8> %a
174 declare <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
182 define <vscale x 8 x i8> @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
186 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
189 %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
190 <vscale x 8 x i8> %0,
191 <vscale x 8 x i8> %1,
193 <vscale x 8 x i1> %3,
196 ret <vscale x 8 x i8> %a
199 declare <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
205 define <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
206 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8:
207 ; CHECK: # %bb.0: # %entry
208 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
209 ; CHECK-NEXT: vslide1up.vx v10, v8, a0
210 ; CHECK-NEXT: vmv.v.v v8, v10
213 %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
214 <vscale x 16 x i8> undef,
215 <vscale x 16 x i8> %0,
219 ret <vscale x 16 x i8> %a
222 declare <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
230 define <vscale x 16 x i8> @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
234 ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t
237 %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
238 <vscale x 16 x i8> %0,
239 <vscale x 16 x i8> %1,
241 <vscale x 16 x i1> %3,
244 ret <vscale x 16 x i8> %a
247 declare <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
253 define <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
254 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8:
255 ; CHECK: # %bb.0: # %entry
256 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
257 ; CHECK-NEXT: vslide1up.vx v12, v8, a0
258 ; CHECK-NEXT: vmv.v.v v8, v12
261 %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
262 <vscale x 32 x i8> undef,
263 <vscale x 32 x i8> %0,
267 ret <vscale x 32 x i8> %a
270 declare <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
278 define <vscale x 32 x i8> @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
279 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8:
280 ; CHECK: # %bb.0: # %entry
281 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
282 ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t
285 %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
286 <vscale x 32 x i8> %0,
287 <vscale x 32 x i8> %1,
289 <vscale x 32 x i1> %3,
292 ret <vscale x 32 x i8> %a
295 declare <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
301 define <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
302 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
305 ; CHECK-NEXT: vslide1up.vx v16, v8, a0
306 ; CHECK-NEXT: vmv.v.v v8, v16
309 %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
310 <vscale x 64 x i8> undef,
311 <vscale x 64 x i8> %0,
315 ret <vscale x 64 x i8> %a
318 declare <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8(
326 define <vscale x 64 x i8> @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
327 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8:
328 ; CHECK: # %bb.0: # %entry
329 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
330 ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t
333 %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8(
334 <vscale x 64 x i8> %0,
335 <vscale x 64 x i8> %1,
337 <vscale x 64 x i1> %3,
340 ret <vscale x 64 x i8> %a
343 declare <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
349 define <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
350 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16:
351 ; CHECK: # %bb.0: # %entry
352 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
353 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
354 ; CHECK-NEXT: vmv1r.v v8, v9
357 %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
358 <vscale x 1 x i16> undef,
359 <vscale x 1 x i16> %0,
363 ret <vscale x 1 x i16> %a
366 declare <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
374 define <vscale x 1 x i16> @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
375 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
378 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
381 %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
382 <vscale x 1 x i16> %0,
383 <vscale x 1 x i16> %1,
385 <vscale x 1 x i1> %3,
388 ret <vscale x 1 x i16> %a
391 declare <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
397 define <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
398 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
401 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
402 ; CHECK-NEXT: vmv1r.v v8, v9
405 %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
406 <vscale x 2 x i16> undef,
407 <vscale x 2 x i16> %0,
411 ret <vscale x 2 x i16> %a
414 declare <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
422 define <vscale x 2 x i16> @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
423 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16:
424 ; CHECK: # %bb.0: # %entry
425 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
426 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
429 %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
430 <vscale x 2 x i16> %0,
431 <vscale x 2 x i16> %1,
433 <vscale x 2 x i1> %3,
436 ret <vscale x 2 x i16> %a
439 declare <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
445 define <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
446 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16:
447 ; CHECK: # %bb.0: # %entry
448 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
449 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
450 ; CHECK-NEXT: vmv.v.v v8, v9
453 %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
454 <vscale x 4 x i16> undef,
455 <vscale x 4 x i16> %0,
459 ret <vscale x 4 x i16> %a
462 declare <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
470 define <vscale x 4 x i16> @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
471 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16:
472 ; CHECK: # %bb.0: # %entry
473 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
474 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
477 %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
478 <vscale x 4 x i16> %0,
479 <vscale x 4 x i16> %1,
481 <vscale x 4 x i1> %3,
484 ret <vscale x 4 x i16> %a
487 declare <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
493 define <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
494 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16:
495 ; CHECK: # %bb.0: # %entry
496 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
497 ; CHECK-NEXT: vslide1up.vx v10, v8, a0
498 ; CHECK-NEXT: vmv.v.v v8, v10
501 %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
502 <vscale x 8 x i16> undef,
503 <vscale x 8 x i16> %0,
507 ret <vscale x 8 x i16> %a
510 declare <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
518 define <vscale x 8 x i16> @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
519 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16:
520 ; CHECK: # %bb.0: # %entry
521 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
522 ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t
525 %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
526 <vscale x 8 x i16> %0,
527 <vscale x 8 x i16> %1,
529 <vscale x 8 x i1> %3,
532 ret <vscale x 8 x i16> %a
535 declare <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
541 define <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
542 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16:
543 ; CHECK: # %bb.0: # %entry
544 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
545 ; CHECK-NEXT: vslide1up.vx v12, v8, a0
546 ; CHECK-NEXT: vmv.v.v v8, v12
549 %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
550 <vscale x 16 x i16> undef,
551 <vscale x 16 x i16> %0,
555 ret <vscale x 16 x i16> %a
558 declare <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
566 define <vscale x 16 x i16> @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
567 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
570 ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t
573 %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
574 <vscale x 16 x i16> %0,
575 <vscale x 16 x i16> %1,
577 <vscale x 16 x i1> %3,
580 ret <vscale x 16 x i16> %a
583 declare <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
589 define <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
590 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16:
591 ; CHECK: # %bb.0: # %entry
592 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
593 ; CHECK-NEXT: vslide1up.vx v16, v8, a0
594 ; CHECK-NEXT: vmv.v.v v8, v16
597 %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
598 <vscale x 32 x i16> undef,
599 <vscale x 32 x i16> %0,
603 ret <vscale x 32 x i16> %a
606 declare <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16(
614 define <vscale x 32 x i16> @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
615 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16:
616 ; CHECK: # %bb.0: # %entry
617 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
618 ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t
621 %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16(
622 <vscale x 32 x i16> %0,
623 <vscale x 32 x i16> %1,
625 <vscale x 32 x i1> %3,
628 ret <vscale x 32 x i16> %a
631 declare <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
637 define <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
638 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32:
639 ; CHECK: # %bb.0: # %entry
640 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
641 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
642 ; CHECK-NEXT: vmv1r.v v8, v9
645 %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
646 <vscale x 1 x i32> undef,
647 <vscale x 1 x i32> %0,
651 ret <vscale x 1 x i32> %a
654 declare <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
662 define <vscale x 1 x i32> @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
663 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32:
664 ; CHECK: # %bb.0: # %entry
665 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
666 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
669 %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
670 <vscale x 1 x i32> %0,
671 <vscale x 1 x i32> %1,
673 <vscale x 1 x i1> %3,
676 ret <vscale x 1 x i32> %a
679 declare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
685 define <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
686 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32:
687 ; CHECK: # %bb.0: # %entry
688 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
689 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
690 ; CHECK-NEXT: vmv.v.v v8, v9
693 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
694 <vscale x 2 x i32> undef,
695 <vscale x 2 x i32> %0,
699 ret <vscale x 2 x i32> %a
702 declare <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
710 define <vscale x 2 x i32> @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
711 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32:
712 ; CHECK: # %bb.0: # %entry
713 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
714 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
717 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
718 <vscale x 2 x i32> %0,
719 <vscale x 2 x i32> %1,
721 <vscale x 2 x i1> %3,
724 ret <vscale x 2 x i32> %a
727 declare <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
733 define <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
734 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32:
735 ; CHECK: # %bb.0: # %entry
736 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
737 ; CHECK-NEXT: vslide1up.vx v10, v8, a0
738 ; CHECK-NEXT: vmv.v.v v8, v10
741 %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
742 <vscale x 4 x i32> undef,
743 <vscale x 4 x i32> %0,
747 ret <vscale x 4 x i32> %a
750 declare <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
758 define <vscale x 4 x i32> @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
759 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32:
760 ; CHECK: # %bb.0: # %entry
761 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
762 ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t
765 %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
766 <vscale x 4 x i32> %0,
767 <vscale x 4 x i32> %1,
769 <vscale x 4 x i1> %3,
772 ret <vscale x 4 x i32> %a
775 declare <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
781 define <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
782 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32:
783 ; CHECK: # %bb.0: # %entry
784 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
785 ; CHECK-NEXT: vslide1up.vx v12, v8, a0
786 ; CHECK-NEXT: vmv.v.v v8, v12
789 %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
790 <vscale x 8 x i32> undef,
791 <vscale x 8 x i32> %0,
795 ret <vscale x 8 x i32> %a
798 declare <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
806 define <vscale x 8 x i32> @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
807 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32:
808 ; CHECK: # %bb.0: # %entry
809 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
810 ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t
813 %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
814 <vscale x 8 x i32> %0,
815 <vscale x 8 x i32> %1,
817 <vscale x 8 x i1> %3,
820 ret <vscale x 8 x i32> %a
823 declare <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
829 define <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
830 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32:
831 ; CHECK: # %bb.0: # %entry
832 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
833 ; CHECK-NEXT: vslide1up.vx v16, v8, a0
834 ; CHECK-NEXT: vmv.v.v v8, v16
837 %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
838 <vscale x 16 x i32> undef,
839 <vscale x 16 x i32> %0,
843 ret <vscale x 16 x i32> %a
846 declare <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32(
854 define <vscale x 16 x i32> @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
855 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32:
856 ; CHECK: # %bb.0: # %entry
857 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
858 ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t
861 %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32(
862 <vscale x 16 x i32> %0,
863 <vscale x 16 x i32> %1,
865 <vscale x 16 x i1> %3,
868 ret <vscale x 16 x i32> %a
871 declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
877 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
878 ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
879 ; RV32: # %bb.0: # %entry
880 ; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, ma
881 ; RV32-NEXT: slli a2, a2, 1
882 ; RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma
883 ; RV32-NEXT: vslide1up.vx v9, v8, a1
884 ; RV32-NEXT: vslide1up.vx v8, v9, a0
887 ; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
888 ; RV64: # %bb.0: # %entry
889 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
890 ; RV64-NEXT: vslide1up.vx v9, v8, a0
891 ; RV64-NEXT: vmv.v.v v8, v9
894 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
895 <vscale x 1 x i64> undef,
896 <vscale x 1 x i64> %0,
900 ret <vscale x 1 x i64> %a
903 declare <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
911 define <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
912 ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64:
913 ; RV32: # %bb.0: # %entry
914 ; RV32-NEXT: vsetvli a3, a2, e64, m1, ta, ma
915 ; RV32-NEXT: slli a3, a3, 1
916 ; RV32-NEXT: vsetvli zero, a3, e32, m1, ta, ma
917 ; RV32-NEXT: vslide1up.vx v10, v9, a1
918 ; RV32-NEXT: vslide1up.vx v9, v10, a0
919 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
920 ; RV32-NEXT: vmerge.vvm v8, v8, v9, v0
923 ; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64:
924 ; RV64: # %bb.0: # %entry
925 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
926 ; RV64-NEXT: vslide1up.vx v8, v9, a0, v0.t
929 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
930 <vscale x 1 x i64> %0,
931 <vscale x 1 x i64> %1,
933 <vscale x 1 x i1> %3,
936 ret <vscale x 1 x i64> %a
939 declare <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
945 define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
946 ; RV32-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64:
947 ; RV32: # %bb.0: # %entry
948 ; RV32-NEXT: vsetvli a2, a2, e64, m2, ta, ma
949 ; RV32-NEXT: slli a2, a2, 1
950 ; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma
951 ; RV32-NEXT: vslide1up.vx v10, v8, a1
952 ; RV32-NEXT: vslide1up.vx v8, v10, a0
955 ; RV64-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64:
956 ; RV64: # %bb.0: # %entry
957 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
958 ; RV64-NEXT: vslide1up.vx v10, v8, a0
959 ; RV64-NEXT: vmv.v.v v8, v10
962 %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
963 <vscale x 2 x i64> undef,
964 <vscale x 2 x i64> %0,
968 ret <vscale x 2 x i64> %a
971 declare <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
979 define <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
980 ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64:
981 ; RV32: # %bb.0: # %entry
982 ; RV32-NEXT: vsetvli a3, a2, e64, m2, ta, ma
983 ; RV32-NEXT: slli a3, a3, 1
984 ; RV32-NEXT: vsetvli zero, a3, e32, m2, ta, ma
985 ; RV32-NEXT: vslide1up.vx v12, v10, a1
986 ; RV32-NEXT: vslide1up.vx v10, v12, a0
987 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
988 ; RV32-NEXT: vmerge.vvm v8, v8, v10, v0
991 ; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64:
992 ; RV64: # %bb.0: # %entry
993 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
994 ; RV64-NEXT: vslide1up.vx v8, v10, a0, v0.t
997 %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
998 <vscale x 2 x i64> %0,
999 <vscale x 2 x i64> %1,
1001 <vscale x 2 x i1> %3,
1004 ret <vscale x 2 x i64> %a
1007 declare <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
1013 define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1014 ; RV32-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64:
1015 ; RV32: # %bb.0: # %entry
1016 ; RV32-NEXT: vsetvli a2, a2, e64, m4, ta, ma
1017 ; RV32-NEXT: slli a2, a2, 1
1018 ; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma
1019 ; RV32-NEXT: vslide1up.vx v12, v8, a1
1020 ; RV32-NEXT: vslide1up.vx v8, v12, a0
1023 ; RV64-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64:
1024 ; RV64: # %bb.0: # %entry
1025 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1026 ; RV64-NEXT: vslide1up.vx v12, v8, a0
1027 ; RV64-NEXT: vmv.v.v v8, v12
1030 %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
1031 <vscale x 4 x i64> undef,
1032 <vscale x 4 x i64> %0,
1036 ret <vscale x 4 x i64> %a
1039 declare <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
1047 define <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1048 ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64:
1049 ; RV32: # %bb.0: # %entry
1050 ; RV32-NEXT: vsetvli a3, a2, e64, m4, ta, ma
1051 ; RV32-NEXT: slli a3, a3, 1
1052 ; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, ma
1053 ; RV32-NEXT: vslide1up.vx v16, v12, a1
1054 ; RV32-NEXT: vslide1up.vx v12, v16, a0
1055 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1056 ; RV32-NEXT: vmerge.vvm v8, v8, v12, v0
1059 ; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64:
1060 ; RV64: # %bb.0: # %entry
1061 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1062 ; RV64-NEXT: vslide1up.vx v8, v12, a0, v0.t
1065 %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
1066 <vscale x 4 x i64> %0,
1067 <vscale x 4 x i64> %1,
1069 <vscale x 4 x i1> %3,
1072 ret <vscale x 4 x i64> %a
1075 declare <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
1081 define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
1082 ; RV32-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64:
1083 ; RV32: # %bb.0: # %entry
1084 ; RV32-NEXT: vsetvli a2, a2, e64, m8, ta, ma
1085 ; RV32-NEXT: slli a2, a2, 1
1086 ; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
1087 ; RV32-NEXT: vslide1up.vx v16, v8, a1
1088 ; RV32-NEXT: vslide1up.vx v8, v16, a0
1091 ; RV64-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64:
1092 ; RV64: # %bb.0: # %entry
1093 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1094 ; RV64-NEXT: vslide1up.vx v16, v8, a0
1095 ; RV64-NEXT: vmv.v.v v8, v16
1098 %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
1099 <vscale x 8 x i64> undef,
1100 <vscale x 8 x i64> %0,
1104 ret <vscale x 8 x i64> %a
1107 declare <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
1115 define <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1116 ; RV32-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64:
1117 ; RV32: # %bb.0: # %entry
1118 ; RV32-NEXT: vsetvli a3, a2, e64, m8, ta, ma
1119 ; RV32-NEXT: slli a3, a3, 1
1120 ; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1121 ; RV32-NEXT: vslide1up.vx v24, v16, a1
1122 ; RV32-NEXT: vslide1up.vx v16, v24, a0
1123 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1124 ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
1127 ; RV64-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64:
1128 ; RV64: # %bb.0: # %entry
1129 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1130 ; RV64-NEXT: vslide1up.vx v8, v16, a0, v0.t
1133 %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
1134 <vscale x 8 x i64> %0,
1135 <vscale x 8 x i64> %1,
1137 <vscale x 8 x i1> %3,
1140 ret <vscale x 8 x i64> %a