1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
11 define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
12 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8:
13 ; CHECK: # %bb.0: # %entry
14 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
15 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
16 ; CHECK-NEXT: vmv1r.v v8, v9
19 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
20 <vscale x 1 x i8> undef,
25 ret <vscale x 1 x i8> %a
28 declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
36 define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
40 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
43 %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
50 ret <vscale x 1 x i8> %a
53 declare <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
59 define <vscale x 2 x i8> @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
63 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
64 ; CHECK-NEXT: vmv1r.v v8, v9
67 %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8(
68 <vscale x 2 x i8> undef,
73 ret <vscale x 2 x i8> %a
76 declare <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
84 define <vscale x 2 x i8> @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
88 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
91 %a = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8(
98 ret <vscale x 2 x i8> %a
101 declare <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
107 define <vscale x 4 x i8> @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
111 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
112 ; CHECK-NEXT: vmv1r.v v8, v9
115 %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8(
116 <vscale x 4 x i8> undef,
117 <vscale x 4 x i8> %0,
121 ret <vscale x 4 x i8> %a
124 declare <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
132 define <vscale x 4 x i8> @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
133 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8:
134 ; CHECK: # %bb.0: # %entry
135 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
136 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
139 %a = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8(
140 <vscale x 4 x i8> %0,
141 <vscale x 4 x i8> %1,
143 <vscale x 4 x i1> %3,
146 ret <vscale x 4 x i8> %a
149 declare <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
155 define <vscale x 8 x i8> @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
156 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8:
157 ; CHECK: # %bb.0: # %entry
158 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
159 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
160 ; CHECK-NEXT: vmv.v.v v8, v9
163 %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8(
164 <vscale x 8 x i8> undef,
165 <vscale x 8 x i8> %0,
169 ret <vscale x 8 x i8> %a
172 declare <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
180 define <vscale x 8 x i8> @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
181 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8:
182 ; CHECK: # %bb.0: # %entry
183 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
184 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
187 %a = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8(
188 <vscale x 8 x i8> %0,
189 <vscale x 8 x i8> %1,
191 <vscale x 8 x i1> %3,
194 ret <vscale x 8 x i8> %a
197 declare <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
203 define <vscale x 16 x i8> @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
204 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8:
205 ; CHECK: # %bb.0: # %entry
206 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
207 ; CHECK-NEXT: vslide1up.vx v10, v8, a0
208 ; CHECK-NEXT: vmv.v.v v8, v10
211 %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8(
212 <vscale x 16 x i8> undef,
213 <vscale x 16 x i8> %0,
217 ret <vscale x 16 x i8> %a
220 declare <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
228 define <vscale x 16 x i8> @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
229 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8:
230 ; CHECK: # %bb.0: # %entry
231 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
232 ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t
235 %a = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8(
236 <vscale x 16 x i8> %0,
237 <vscale x 16 x i8> %1,
239 <vscale x 16 x i1> %3,
242 ret <vscale x 16 x i8> %a
245 declare <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
251 define <vscale x 32 x i8> @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
252 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8:
253 ; CHECK: # %bb.0: # %entry
254 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
255 ; CHECK-NEXT: vslide1up.vx v12, v8, a0
256 ; CHECK-NEXT: vmv.v.v v8, v12
259 %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8(
260 <vscale x 32 x i8> undef,
261 <vscale x 32 x i8> %0,
265 ret <vscale x 32 x i8> %a
268 declare <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
276 define <vscale x 32 x i8> @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
277 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8:
278 ; CHECK: # %bb.0: # %entry
279 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
280 ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t
283 %a = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8(
284 <vscale x 32 x i8> %0,
285 <vscale x 32 x i8> %1,
287 <vscale x 32 x i1> %3,
290 ret <vscale x 32 x i8> %a
293 declare <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
299 define <vscale x 64 x i8> @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
300 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8:
301 ; CHECK: # %bb.0: # %entry
302 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
303 ; CHECK-NEXT: vslide1up.vx v16, v8, a0
304 ; CHECK-NEXT: vmv.v.v v8, v16
307 %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8(
308 <vscale x 64 x i8> undef,
309 <vscale x 64 x i8> %0,
313 ret <vscale x 64 x i8> %a
316 declare <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8(
324 define <vscale x 64 x i8> @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
325 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
328 ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t
331 %a = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8(
332 <vscale x 64 x i8> %0,
333 <vscale x 64 x i8> %1,
335 <vscale x 64 x i1> %3,
338 ret <vscale x 64 x i8> %a
341 declare <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
347 define <vscale x 1 x i16> @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
348 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16:
349 ; CHECK: # %bb.0: # %entry
350 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
351 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
352 ; CHECK-NEXT: vmv1r.v v8, v9
355 %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16(
356 <vscale x 1 x i16> undef,
357 <vscale x 1 x i16> %0,
361 ret <vscale x 1 x i16> %a
364 declare <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
372 define <vscale x 1 x i16> @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
373 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16:
374 ; CHECK: # %bb.0: # %entry
375 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
376 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
379 %a = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16(
380 <vscale x 1 x i16> %0,
381 <vscale x 1 x i16> %1,
383 <vscale x 1 x i1> %3,
386 ret <vscale x 1 x i16> %a
389 declare <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
395 define <vscale x 2 x i16> @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
396 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16:
397 ; CHECK: # %bb.0: # %entry
398 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
399 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
400 ; CHECK-NEXT: vmv1r.v v8, v9
403 %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16(
404 <vscale x 2 x i16> undef,
405 <vscale x 2 x i16> %0,
409 ret <vscale x 2 x i16> %a
412 declare <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
420 define <vscale x 2 x i16> @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
421 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16:
422 ; CHECK: # %bb.0: # %entry
423 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
424 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
427 %a = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16(
428 <vscale x 2 x i16> %0,
429 <vscale x 2 x i16> %1,
431 <vscale x 2 x i1> %3,
434 ret <vscale x 2 x i16> %a
437 declare <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
443 define <vscale x 4 x i16> @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
444 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16:
445 ; CHECK: # %bb.0: # %entry
446 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
447 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
448 ; CHECK-NEXT: vmv.v.v v8, v9
451 %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16(
452 <vscale x 4 x i16> undef,
453 <vscale x 4 x i16> %0,
457 ret <vscale x 4 x i16> %a
460 declare <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
468 define <vscale x 4 x i16> @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
469 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16:
470 ; CHECK: # %bb.0: # %entry
471 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
472 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
475 %a = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16(
476 <vscale x 4 x i16> %0,
477 <vscale x 4 x i16> %1,
479 <vscale x 4 x i1> %3,
482 ret <vscale x 4 x i16> %a
485 declare <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
491 define <vscale x 8 x i16> @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
492 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16:
493 ; CHECK: # %bb.0: # %entry
494 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
495 ; CHECK-NEXT: vslide1up.vx v10, v8, a0
496 ; CHECK-NEXT: vmv.v.v v8, v10
499 %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16(
500 <vscale x 8 x i16> undef,
501 <vscale x 8 x i16> %0,
505 ret <vscale x 8 x i16> %a
508 declare <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
516 define <vscale x 8 x i16> @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
517 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16:
518 ; CHECK: # %bb.0: # %entry
519 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
520 ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t
523 %a = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16(
524 <vscale x 8 x i16> %0,
525 <vscale x 8 x i16> %1,
527 <vscale x 8 x i1> %3,
530 ret <vscale x 8 x i16> %a
533 declare <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
539 define <vscale x 16 x i16> @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
540 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16:
541 ; CHECK: # %bb.0: # %entry
542 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
543 ; CHECK-NEXT: vslide1up.vx v12, v8, a0
544 ; CHECK-NEXT: vmv.v.v v8, v12
547 %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16(
548 <vscale x 16 x i16> undef,
549 <vscale x 16 x i16> %0,
553 ret <vscale x 16 x i16> %a
556 declare <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
564 define <vscale x 16 x i16> @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
565 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16:
566 ; CHECK: # %bb.0: # %entry
567 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
568 ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t
571 %a = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16(
572 <vscale x 16 x i16> %0,
573 <vscale x 16 x i16> %1,
575 <vscale x 16 x i1> %3,
578 ret <vscale x 16 x i16> %a
581 declare <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
587 define <vscale x 32 x i16> @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
588 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16:
589 ; CHECK: # %bb.0: # %entry
590 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
591 ; CHECK-NEXT: vslide1up.vx v16, v8, a0
592 ; CHECK-NEXT: vmv.v.v v8, v16
595 %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16(
596 <vscale x 32 x i16> undef,
597 <vscale x 32 x i16> %0,
601 ret <vscale x 32 x i16> %a
604 declare <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16(
612 define <vscale x 32 x i16> @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
613 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16:
614 ; CHECK: # %bb.0: # %entry
615 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
616 ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t
619 %a = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16(
620 <vscale x 32 x i16> %0,
621 <vscale x 32 x i16> %1,
623 <vscale x 32 x i1> %3,
626 ret <vscale x 32 x i16> %a
629 declare <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
635 define <vscale x 1 x i32> @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
636 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32:
637 ; CHECK: # %bb.0: # %entry
638 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
639 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
640 ; CHECK-NEXT: vmv1r.v v8, v9
643 %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32(
644 <vscale x 1 x i32> undef,
645 <vscale x 1 x i32> %0,
649 ret <vscale x 1 x i32> %a
652 declare <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
660 define <vscale x 1 x i32> @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
661 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32:
662 ; CHECK: # %bb.0: # %entry
663 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
664 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
667 %a = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32(
668 <vscale x 1 x i32> %0,
669 <vscale x 1 x i32> %1,
671 <vscale x 1 x i1> %3,
674 ret <vscale x 1 x i32> %a
677 declare <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
683 define <vscale x 2 x i32> @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
684 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
687 ; CHECK-NEXT: vslide1up.vx v9, v8, a0
688 ; CHECK-NEXT: vmv.v.v v8, v9
691 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32(
692 <vscale x 2 x i32> undef,
693 <vscale x 2 x i32> %0,
697 ret <vscale x 2 x i32> %a
700 declare <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
708 define <vscale x 2 x i32> @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
709 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32:
710 ; CHECK: # %bb.0: # %entry
711 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
712 ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t
715 %a = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32(
716 <vscale x 2 x i32> %0,
717 <vscale x 2 x i32> %1,
719 <vscale x 2 x i1> %3,
722 ret <vscale x 2 x i32> %a
725 declare <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
731 define <vscale x 4 x i32> @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
732 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32:
733 ; CHECK: # %bb.0: # %entry
734 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
735 ; CHECK-NEXT: vslide1up.vx v10, v8, a0
736 ; CHECK-NEXT: vmv.v.v v8, v10
739 %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32(
740 <vscale x 4 x i32> undef,
741 <vscale x 4 x i32> %0,
745 ret <vscale x 4 x i32> %a
748 declare <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
756 define <vscale x 4 x i32> @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
757 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32:
758 ; CHECK: # %bb.0: # %entry
759 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
760 ; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t
763 %a = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32(
764 <vscale x 4 x i32> %0,
765 <vscale x 4 x i32> %1,
767 <vscale x 4 x i1> %3,
770 ret <vscale x 4 x i32> %a
773 declare <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
779 define <vscale x 8 x i32> @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
780 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32:
781 ; CHECK: # %bb.0: # %entry
782 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
783 ; CHECK-NEXT: vslide1up.vx v12, v8, a0
784 ; CHECK-NEXT: vmv.v.v v8, v12
787 %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32(
788 <vscale x 8 x i32> undef,
789 <vscale x 8 x i32> %0,
793 ret <vscale x 8 x i32> %a
796 declare <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
804 define <vscale x 8 x i32> @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
805 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32:
806 ; CHECK: # %bb.0: # %entry
807 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
808 ; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t
811 %a = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32(
812 <vscale x 8 x i32> %0,
813 <vscale x 8 x i32> %1,
815 <vscale x 8 x i1> %3,
818 ret <vscale x 8 x i32> %a
821 declare <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
827 define <vscale x 16 x i32> @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
828 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32:
829 ; CHECK: # %bb.0: # %entry
830 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
831 ; CHECK-NEXT: vslide1up.vx v16, v8, a0
832 ; CHECK-NEXT: vmv.v.v v8, v16
835 %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32(
836 <vscale x 16 x i32> undef,
837 <vscale x 16 x i32> %0,
841 ret <vscale x 16 x i32> %a
844 declare <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32(
852 define <vscale x 16 x i32> @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
853 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32:
854 ; CHECK: # %bb.0: # %entry
855 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
856 ; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t
859 %a = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32(
860 <vscale x 16 x i32> %0,
861 <vscale x 16 x i32> %1,
863 <vscale x 16 x i1> %3,
866 ret <vscale x 16 x i32> %a
869 declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
875 define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
876 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
877 ; CHECK: # %bb.0: # %entry
878 ; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma
879 ; CHECK-NEXT: slli a2, a2, 1
880 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
881 ; CHECK-NEXT: vslide1up.vx v9, v8, a1
882 ; CHECK-NEXT: vslide1up.vx v8, v9, a0
885 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
886 <vscale x 1 x i64> undef,
887 <vscale x 1 x i64> %0,
891 ret <vscale x 1 x i64> %a
894 declare <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
902 define <vscale x 1 x i64> @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
903 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64:
904 ; CHECK: # %bb.0: # %entry
905 ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma
906 ; CHECK-NEXT: slli a3, a3, 1
907 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma
908 ; CHECK-NEXT: vslide1up.vx v10, v9, a1
909 ; CHECK-NEXT: vslide1up.vx v9, v10, a0
910 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
911 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
914 %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64(
915 <vscale x 1 x i64> %0,
916 <vscale x 1 x i64> %1,
918 <vscale x 1 x i1> %3,
921 ret <vscale x 1 x i64> %a
924 declare <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
930 define <vscale x 2 x i64> @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
931 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, ma
934 ; CHECK-NEXT: slli a2, a2, 1
935 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
936 ; CHECK-NEXT: vslide1up.vx v10, v8, a1
937 ; CHECK-NEXT: vslide1up.vx v8, v10, a0
940 %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64(
941 <vscale x 2 x i64> undef,
942 <vscale x 2 x i64> %0,
946 ret <vscale x 2 x i64> %a
949 declare <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
957 define <vscale x 2 x i64> @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
958 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64:
959 ; CHECK: # %bb.0: # %entry
960 ; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, ma
961 ; CHECK-NEXT: slli a3, a3, 1
962 ; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, ma
963 ; CHECK-NEXT: vslide1up.vx v12, v10, a1
964 ; CHECK-NEXT: vslide1up.vx v10, v12, a0
965 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
966 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
969 %a = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64(
970 <vscale x 2 x i64> %0,
971 <vscale x 2 x i64> %1,
973 <vscale x 2 x i1> %3,
976 ret <vscale x 2 x i64> %a
979 declare <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
985 define <vscale x 4 x i64> @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
986 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64:
987 ; CHECK: # %bb.0: # %entry
988 ; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, ma
989 ; CHECK-NEXT: slli a2, a2, 1
990 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
991 ; CHECK-NEXT: vslide1up.vx v12, v8, a1
992 ; CHECK-NEXT: vslide1up.vx v8, v12, a0
995 %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64(
996 <vscale x 4 x i64> undef,
997 <vscale x 4 x i64> %0,
1001 ret <vscale x 4 x i64> %a
1004 declare <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
1012 define <vscale x 4 x i64> @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1013 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64:
1014 ; CHECK: # %bb.0: # %entry
1015 ; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, ma
1016 ; CHECK-NEXT: slli a3, a3, 1
1017 ; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma
1018 ; CHECK-NEXT: vslide1up.vx v16, v12, a1
1019 ; CHECK-NEXT: vslide1up.vx v12, v16, a0
1020 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1021 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
1024 %a = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64(
1025 <vscale x 4 x i64> %0,
1026 <vscale x 4 x i64> %1,
1028 <vscale x 4 x i1> %3,
1031 ret <vscale x 4 x i64> %a
1034 declare <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
1040 define <vscale x 8 x i64> @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
1041 ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64:
1042 ; CHECK: # %bb.0: # %entry
1043 ; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, ma
1044 ; CHECK-NEXT: slli a2, a2, 1
1045 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
1046 ; CHECK-NEXT: vslide1up.vx v16, v8, a1
1047 ; CHECK-NEXT: vslide1up.vx v8, v16, a0
1050 %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64(
1051 <vscale x 8 x i64> undef,
1052 <vscale x 8 x i64> %0,
1056 ret <vscale x 8 x i64> %a
1059 declare <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
1067 define <vscale x 8 x i64> @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1068 ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64:
1069 ; CHECK: # %bb.0: # %entry
1070 ; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, ma
1071 ; CHECK-NEXT: slli a3, a3, 1
1072 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
1073 ; CHECK-NEXT: vslide1up.vx v24, v16, a1
1074 ; CHECK-NEXT: vslide1up.vx v16, v24, a0
1075 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1076 ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
1079 %a = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64(
1080 <vscale x 8 x i64> %0,
1081 <vscale x 8 x i64> %1,
1083 <vscale x 8 x i1> %3,
1086 ret <vscale x 8 x i64> %a