1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
5 declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
11 define <vscale x 1 x i8> @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
12 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8:
13 ; CHECK: # %bb.0: # %entry
14 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
15 ; CHECK-NEXT: vsadd.vv v8, v8, v9
18 %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
19 <vscale x 1 x i8> undef,
24 ret <vscale x 1 x i8> %a
27 declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
35 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
36 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
39 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
42 %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
49 ret <vscale x 1 x i8> %a
52 declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
58 define <vscale x 2 x i8> @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
59 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8:
60 ; CHECK: # %bb.0: # %entry
61 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
62 ; CHECK-NEXT: vsadd.vv v8, v8, v9
65 %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
66 <vscale x 2 x i8> undef,
71 ret <vscale x 2 x i8> %a
74 declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
82 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
86 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
89 %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
96 ret <vscale x 2 x i8> %a
99 declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
105 define <vscale x 4 x i8> @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
106 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
109 ; CHECK-NEXT: vsadd.vv v8, v8, v9
112 %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
113 <vscale x 4 x i8> undef,
114 <vscale x 4 x i8> %0,
115 <vscale x 4 x i8> %1,
118 ret <vscale x 4 x i8> %a
121 declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
129 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
130 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
131 ; CHECK: # %bb.0: # %entry
132 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
133 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
136 %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
137 <vscale x 4 x i8> %0,
138 <vscale x 4 x i8> %1,
139 <vscale x 4 x i8> %2,
140 <vscale x 4 x i1> %3,
143 ret <vscale x 4 x i8> %a
146 declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
152 define <vscale x 8 x i8> @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
153 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8:
154 ; CHECK: # %bb.0: # %entry
155 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
156 ; CHECK-NEXT: vsadd.vv v8, v8, v9
159 %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
160 <vscale x 8 x i8> undef,
161 <vscale x 8 x i8> %0,
162 <vscale x 8 x i8> %1,
165 ret <vscale x 8 x i8> %a
168 declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
176 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
177 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
178 ; CHECK: # %bb.0: # %entry
179 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
180 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
183 %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
184 <vscale x 8 x i8> %0,
185 <vscale x 8 x i8> %1,
186 <vscale x 8 x i8> %2,
187 <vscale x 8 x i1> %3,
190 ret <vscale x 8 x i8> %a
193 declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
199 define <vscale x 16 x i8> @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
200 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8:
201 ; CHECK: # %bb.0: # %entry
202 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
203 ; CHECK-NEXT: vsadd.vv v8, v8, v10
206 %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
207 <vscale x 16 x i8> undef,
208 <vscale x 16 x i8> %0,
209 <vscale x 16 x i8> %1,
212 ret <vscale x 16 x i8> %a
215 declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
223 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
224 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
225 ; CHECK: # %bb.0: # %entry
226 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
227 ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
230 %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
231 <vscale x 16 x i8> %0,
232 <vscale x 16 x i8> %1,
233 <vscale x 16 x i8> %2,
234 <vscale x 16 x i1> %3,
237 ret <vscale x 16 x i8> %a
240 declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
246 define <vscale x 32 x i8> @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
247 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8:
248 ; CHECK: # %bb.0: # %entry
249 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
250 ; CHECK-NEXT: vsadd.vv v8, v8, v12
253 %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
254 <vscale x 32 x i8> undef,
255 <vscale x 32 x i8> %0,
256 <vscale x 32 x i8> %1,
259 ret <vscale x 32 x i8> %a
262 declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
270 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
271 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
274 ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
277 %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
278 <vscale x 32 x i8> %0,
279 <vscale x 32 x i8> %1,
280 <vscale x 32 x i8> %2,
281 <vscale x 32 x i1> %3,
284 ret <vscale x 32 x i8> %a
287 declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
293 define <vscale x 64 x i8> @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
294 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8:
295 ; CHECK: # %bb.0: # %entry
296 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
297 ; CHECK-NEXT: vsadd.vv v8, v8, v16
300 %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
301 <vscale x 64 x i8> undef,
302 <vscale x 64 x i8> %0,
303 <vscale x 64 x i8> %1,
306 ret <vscale x 64 x i8> %a
309 declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
317 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
318 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
319 ; CHECK: # %bb.0: # %entry
320 ; CHECK-NEXT: vl8r.v v24, (a0)
321 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
322 ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
325 %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
326 <vscale x 64 x i8> %0,
327 <vscale x 64 x i8> %1,
328 <vscale x 64 x i8> %2,
329 <vscale x 64 x i1> %3,
332 ret <vscale x 64 x i8> %a
335 declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
341 define <vscale x 1 x i16> @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
342 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16:
343 ; CHECK: # %bb.0: # %entry
344 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
345 ; CHECK-NEXT: vsadd.vv v8, v8, v9
348 %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
349 <vscale x 1 x i16> undef,
350 <vscale x 1 x i16> %0,
351 <vscale x 1 x i16> %1,
354 ret <vscale x 1 x i16> %a
357 declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
365 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
366 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
367 ; CHECK: # %bb.0: # %entry
368 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
369 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
372 %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
373 <vscale x 1 x i16> %0,
374 <vscale x 1 x i16> %1,
375 <vscale x 1 x i16> %2,
376 <vscale x 1 x i1> %3,
379 ret <vscale x 1 x i16> %a
382 declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
388 define <vscale x 2 x i16> @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
389 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16:
390 ; CHECK: # %bb.0: # %entry
391 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
392 ; CHECK-NEXT: vsadd.vv v8, v8, v9
395 %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
396 <vscale x 2 x i16> undef,
397 <vscale x 2 x i16> %0,
398 <vscale x 2 x i16> %1,
401 ret <vscale x 2 x i16> %a
404 declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
412 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
413 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
414 ; CHECK: # %bb.0: # %entry
415 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
416 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
419 %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
420 <vscale x 2 x i16> %0,
421 <vscale x 2 x i16> %1,
422 <vscale x 2 x i16> %2,
423 <vscale x 2 x i1> %3,
426 ret <vscale x 2 x i16> %a
429 declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
435 define <vscale x 4 x i16> @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
436 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16:
437 ; CHECK: # %bb.0: # %entry
438 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
439 ; CHECK-NEXT: vsadd.vv v8, v8, v9
442 %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
443 <vscale x 4 x i16> undef,
444 <vscale x 4 x i16> %0,
445 <vscale x 4 x i16> %1,
448 ret <vscale x 4 x i16> %a
451 declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
459 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
460 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
461 ; CHECK: # %bb.0: # %entry
462 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
463 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
466 %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
467 <vscale x 4 x i16> %0,
468 <vscale x 4 x i16> %1,
469 <vscale x 4 x i16> %2,
470 <vscale x 4 x i1> %3,
473 ret <vscale x 4 x i16> %a
476 declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
482 define <vscale x 8 x i16> @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
483 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16:
484 ; CHECK: # %bb.0: # %entry
485 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
486 ; CHECK-NEXT: vsadd.vv v8, v8, v10
489 %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
490 <vscale x 8 x i16> undef,
491 <vscale x 8 x i16> %0,
492 <vscale x 8 x i16> %1,
495 ret <vscale x 8 x i16> %a
498 declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
506 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
507 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
508 ; CHECK: # %bb.0: # %entry
509 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
510 ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
513 %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
514 <vscale x 8 x i16> %0,
515 <vscale x 8 x i16> %1,
516 <vscale x 8 x i16> %2,
517 <vscale x 8 x i1> %3,
520 ret <vscale x 8 x i16> %a
523 declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
529 define <vscale x 16 x i16> @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
530 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16:
531 ; CHECK: # %bb.0: # %entry
532 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
533 ; CHECK-NEXT: vsadd.vv v8, v8, v12
536 %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
537 <vscale x 16 x i16> undef,
538 <vscale x 16 x i16> %0,
539 <vscale x 16 x i16> %1,
542 ret <vscale x 16 x i16> %a
545 declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
553 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
554 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
555 ; CHECK: # %bb.0: # %entry
556 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
557 ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
560 %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
561 <vscale x 16 x i16> %0,
562 <vscale x 16 x i16> %1,
563 <vscale x 16 x i16> %2,
564 <vscale x 16 x i1> %3,
567 ret <vscale x 16 x i16> %a
570 declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
576 define <vscale x 32 x i16> @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
577 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16:
578 ; CHECK: # %bb.0: # %entry
579 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
580 ; CHECK-NEXT: vsadd.vv v8, v8, v16
583 %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
584 <vscale x 32 x i16> undef,
585 <vscale x 32 x i16> %0,
586 <vscale x 32 x i16> %1,
589 ret <vscale x 32 x i16> %a
592 declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
600 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
601 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
602 ; CHECK: # %bb.0: # %entry
603 ; CHECK-NEXT: vl8re16.v v24, (a0)
604 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
605 ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
608 %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
609 <vscale x 32 x i16> %0,
610 <vscale x 32 x i16> %1,
611 <vscale x 32 x i16> %2,
612 <vscale x 32 x i1> %3,
615 ret <vscale x 32 x i16> %a
618 declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
624 define <vscale x 1 x i32> @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
625 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32:
626 ; CHECK: # %bb.0: # %entry
627 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
628 ; CHECK-NEXT: vsadd.vv v8, v8, v9
631 %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
632 <vscale x 1 x i32> undef,
633 <vscale x 1 x i32> %0,
634 <vscale x 1 x i32> %1,
637 ret <vscale x 1 x i32> %a
640 declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
648 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
649 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
650 ; CHECK: # %bb.0: # %entry
651 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
652 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
655 %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
656 <vscale x 1 x i32> %0,
657 <vscale x 1 x i32> %1,
658 <vscale x 1 x i32> %2,
659 <vscale x 1 x i1> %3,
662 ret <vscale x 1 x i32> %a
665 declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
671 define <vscale x 2 x i32> @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
672 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32:
673 ; CHECK: # %bb.0: # %entry
674 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
675 ; CHECK-NEXT: vsadd.vv v8, v8, v9
678 %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
679 <vscale x 2 x i32> undef,
680 <vscale x 2 x i32> %0,
681 <vscale x 2 x i32> %1,
684 ret <vscale x 2 x i32> %a
687 declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
695 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
696 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
697 ; CHECK: # %bb.0: # %entry
698 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
699 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
702 %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
703 <vscale x 2 x i32> %0,
704 <vscale x 2 x i32> %1,
705 <vscale x 2 x i32> %2,
706 <vscale x 2 x i1> %3,
709 ret <vscale x 2 x i32> %a
712 declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
718 define <vscale x 4 x i32> @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
719 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32:
720 ; CHECK: # %bb.0: # %entry
721 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
722 ; CHECK-NEXT: vsadd.vv v8, v8, v10
725 %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
726 <vscale x 4 x i32> undef,
727 <vscale x 4 x i32> %0,
728 <vscale x 4 x i32> %1,
731 ret <vscale x 4 x i32> %a
734 declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
742 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
743 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
744 ; CHECK: # %bb.0: # %entry
745 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
746 ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
749 %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
750 <vscale x 4 x i32> %0,
751 <vscale x 4 x i32> %1,
752 <vscale x 4 x i32> %2,
753 <vscale x 4 x i1> %3,
756 ret <vscale x 4 x i32> %a
759 declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
765 define <vscale x 8 x i32> @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
766 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32:
767 ; CHECK: # %bb.0: # %entry
768 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
769 ; CHECK-NEXT: vsadd.vv v8, v8, v12
772 %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
773 <vscale x 8 x i32> undef,
774 <vscale x 8 x i32> %0,
775 <vscale x 8 x i32> %1,
778 ret <vscale x 8 x i32> %a
781 declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
789 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
790 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
791 ; CHECK: # %bb.0: # %entry
792 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
793 ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
796 %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
797 <vscale x 8 x i32> %0,
798 <vscale x 8 x i32> %1,
799 <vscale x 8 x i32> %2,
800 <vscale x 8 x i1> %3,
803 ret <vscale x 8 x i32> %a
806 declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
812 define <vscale x 16 x i32> @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
813 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32:
814 ; CHECK: # %bb.0: # %entry
815 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
816 ; CHECK-NEXT: vsadd.vv v8, v8, v16
819 %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
820 <vscale x 16 x i32> undef,
821 <vscale x 16 x i32> %0,
822 <vscale x 16 x i32> %1,
825 ret <vscale x 16 x i32> %a
828 declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
836 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
837 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
838 ; CHECK: # %bb.0: # %entry
839 ; CHECK-NEXT: vl8re32.v v24, (a0)
840 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
841 ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
844 %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
845 <vscale x 16 x i32> %0,
846 <vscale x 16 x i32> %1,
847 <vscale x 16 x i32> %2,
848 <vscale x 16 x i1> %3,
851 ret <vscale x 16 x i32> %a
854 declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
860 define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
861 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64:
862 ; CHECK: # %bb.0: # %entry
863 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
864 ; CHECK-NEXT: vsadd.vv v8, v8, v9
867 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
868 <vscale x 1 x i64> undef,
869 <vscale x 1 x i64> %0,
870 <vscale x 1 x i64> %1,
873 ret <vscale x 1 x i64> %a
876 declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
884 define <vscale x 1 x i64> @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
885 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
886 ; CHECK: # %bb.0: # %entry
887 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
888 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
891 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
892 <vscale x 1 x i64> %0,
893 <vscale x 1 x i64> %1,
894 <vscale x 1 x i64> %2,
895 <vscale x 1 x i1> %3,
898 ret <vscale x 1 x i64> %a
901 declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
907 define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
908 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64:
909 ; CHECK: # %bb.0: # %entry
910 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
911 ; CHECK-NEXT: vsadd.vv v8, v8, v10
914 %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
915 <vscale x 2 x i64> undef,
916 <vscale x 2 x i64> %0,
917 <vscale x 2 x i64> %1,
920 ret <vscale x 2 x i64> %a
923 declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
931 define <vscale x 2 x i64> @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
932 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
933 ; CHECK: # %bb.0: # %entry
934 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
935 ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
938 %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
939 <vscale x 2 x i64> %0,
940 <vscale x 2 x i64> %1,
941 <vscale x 2 x i64> %2,
942 <vscale x 2 x i1> %3,
945 ret <vscale x 2 x i64> %a
948 declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
954 define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
955 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64:
956 ; CHECK: # %bb.0: # %entry
957 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
958 ; CHECK-NEXT: vsadd.vv v8, v8, v12
961 %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
962 <vscale x 4 x i64> undef,
963 <vscale x 4 x i64> %0,
964 <vscale x 4 x i64> %1,
967 ret <vscale x 4 x i64> %a
970 declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
978 define <vscale x 4 x i64> @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
979 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
980 ; CHECK: # %bb.0: # %entry
981 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
982 ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
985 %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
986 <vscale x 4 x i64> %0,
987 <vscale x 4 x i64> %1,
988 <vscale x 4 x i64> %2,
989 <vscale x 4 x i1> %3,
992 ret <vscale x 4 x i64> %a
995 declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
1001 define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
1002 ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64:
1003 ; CHECK: # %bb.0: # %entry
1004 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1005 ; CHECK-NEXT: vsadd.vv v8, v8, v16
1008 %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
1009 <vscale x 8 x i64> undef,
1010 <vscale x 8 x i64> %0,
1011 <vscale x 8 x i64> %1,
1014 ret <vscale x 8 x i64> %a
1017 declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
1025 define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1026 ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1027 ; CHECK: # %bb.0: # %entry
1028 ; CHECK-NEXT: vl8re64.v v24, (a0)
1029 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1030 ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
1033 %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
1034 <vscale x 8 x i64> %0,
1035 <vscale x 8 x i64> %1,
1036 <vscale x 8 x i64> %2,
1037 <vscale x 8 x i1> %3,
1040 ret <vscale x 8 x i64> %a
1043 declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
1049 define <vscale x 1 x i8> @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
1050 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8:
1051 ; CHECK: # %bb.0: # %entry
1052 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1053 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1056 %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
1057 <vscale x 1 x i8> undef,
1058 <vscale x 1 x i8> %0,
1062 ret <vscale x 1 x i8> %a
1065 declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
1073 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1074 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8:
1075 ; CHECK: # %bb.0: # %entry
1076 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1077 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1080 %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
1081 <vscale x 1 x i8> %0,
1082 <vscale x 1 x i8> %1,
1084 <vscale x 1 x i1> %3,
1087 ret <vscale x 1 x i8> %a
1090 declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
1096 define <vscale x 2 x i8> @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
1097 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8:
1098 ; CHECK: # %bb.0: # %entry
1099 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1100 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1103 %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
1104 <vscale x 2 x i8> undef,
1105 <vscale x 2 x i8> %0,
1109 ret <vscale x 2 x i8> %a
1112 declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
1120 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1121 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8:
1122 ; CHECK: # %bb.0: # %entry
1123 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1124 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1127 %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
1128 <vscale x 2 x i8> %0,
1129 <vscale x 2 x i8> %1,
1131 <vscale x 2 x i1> %3,
1134 ret <vscale x 2 x i8> %a
1137 declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
1143 define <vscale x 4 x i8> @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
1144 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8:
1145 ; CHECK: # %bb.0: # %entry
1146 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1147 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1150 %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
1151 <vscale x 4 x i8> undef,
1152 <vscale x 4 x i8> %0,
1156 ret <vscale x 4 x i8> %a
1159 declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
1167 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1168 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8:
1169 ; CHECK: # %bb.0: # %entry
1170 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1171 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1174 %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
1175 <vscale x 4 x i8> %0,
1176 <vscale x 4 x i8> %1,
1178 <vscale x 4 x i1> %3,
1181 ret <vscale x 4 x i8> %a
1184 declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
1190 define <vscale x 8 x i8> @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
1191 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8:
1192 ; CHECK: # %bb.0: # %entry
1193 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1194 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1197 %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
1198 <vscale x 8 x i8> undef,
1199 <vscale x 8 x i8> %0,
1203 ret <vscale x 8 x i8> %a
1206 declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
1214 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1215 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8:
1216 ; CHECK: # %bb.0: # %entry
1217 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1218 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1221 %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
1222 <vscale x 8 x i8> %0,
1223 <vscale x 8 x i8> %1,
1225 <vscale x 8 x i1> %3,
1228 ret <vscale x 8 x i8> %a
1231 declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
1237 define <vscale x 16 x i8> @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
1238 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8:
1239 ; CHECK: # %bb.0: # %entry
1240 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1241 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1244 %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
1245 <vscale x 16 x i8> undef,
1246 <vscale x 16 x i8> %0,
1250 ret <vscale x 16 x i8> %a
1253 declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
1261 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1262 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8:
1263 ; CHECK: # %bb.0: # %entry
1264 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1265 ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t
1268 %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
1269 <vscale x 16 x i8> %0,
1270 <vscale x 16 x i8> %1,
1272 <vscale x 16 x i1> %3,
1275 ret <vscale x 16 x i8> %a
1278 declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
1284 define <vscale x 32 x i8> @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
1285 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8:
1286 ; CHECK: # %bb.0: # %entry
1287 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1288 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1291 %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
1292 <vscale x 32 x i8> undef,
1293 <vscale x 32 x i8> %0,
1297 ret <vscale x 32 x i8> %a
1300 declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
1308 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1309 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8:
1310 ; CHECK: # %bb.0: # %entry
1311 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1312 ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t
1315 %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
1316 <vscale x 32 x i8> %0,
1317 <vscale x 32 x i8> %1,
1319 <vscale x 32 x i1> %3,
1322 ret <vscale x 32 x i8> %a
1325 declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
1331 define <vscale x 64 x i8> @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
1332 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8:
1333 ; CHECK: # %bb.0: # %entry
1334 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1335 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1338 %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
1339 <vscale x 64 x i8> undef,
1340 <vscale x 64 x i8> %0,
1344 ret <vscale x 64 x i8> %a
1347 declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
1355 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
1356 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8:
1357 ; CHECK: # %bb.0: # %entry
1358 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1359 ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t
1362 %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
1363 <vscale x 64 x i8> %0,
1364 <vscale x 64 x i8> %1,
1366 <vscale x 64 x i1> %3,
1369 ret <vscale x 64 x i8> %a
1372 declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
1378 define <vscale x 1 x i16> @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
1379 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16:
1380 ; CHECK: # %bb.0: # %entry
1381 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1382 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1385 %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
1386 <vscale x 1 x i16> undef,
1387 <vscale x 1 x i16> %0,
1391 ret <vscale x 1 x i16> %a
1394 declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
1402 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1403 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16:
1404 ; CHECK: # %bb.0: # %entry
1405 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1406 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1409 %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
1410 <vscale x 1 x i16> %0,
1411 <vscale x 1 x i16> %1,
1413 <vscale x 1 x i1> %3,
1416 ret <vscale x 1 x i16> %a
1419 declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
1425 define <vscale x 2 x i16> @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
1426 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16:
1427 ; CHECK: # %bb.0: # %entry
1428 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1429 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1432 %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
1433 <vscale x 2 x i16> undef,
1434 <vscale x 2 x i16> %0,
1438 ret <vscale x 2 x i16> %a
1441 declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
1449 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1450 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16:
1451 ; CHECK: # %bb.0: # %entry
1452 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1453 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1456 %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
1457 <vscale x 2 x i16> %0,
1458 <vscale x 2 x i16> %1,
1460 <vscale x 2 x i1> %3,
1463 ret <vscale x 2 x i16> %a
1466 declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
1472 define <vscale x 4 x i16> @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
1473 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16:
1474 ; CHECK: # %bb.0: # %entry
1475 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1476 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1479 %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
1480 <vscale x 4 x i16> undef,
1481 <vscale x 4 x i16> %0,
1485 ret <vscale x 4 x i16> %a
1488 declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
1496 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1497 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16:
1498 ; CHECK: # %bb.0: # %entry
1499 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1500 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1503 %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
1504 <vscale x 4 x i16> %0,
1505 <vscale x 4 x i16> %1,
1507 <vscale x 4 x i1> %3,
1510 ret <vscale x 4 x i16> %a
1513 declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
1519 define <vscale x 8 x i16> @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
1520 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16:
1521 ; CHECK: # %bb.0: # %entry
1522 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1523 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1526 %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
1527 <vscale x 8 x i16> undef,
1528 <vscale x 8 x i16> %0,
1532 ret <vscale x 8 x i16> %a
1535 declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
1543 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1544 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16:
1545 ; CHECK: # %bb.0: # %entry
1546 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1547 ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t
1550 %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
1551 <vscale x 8 x i16> %0,
1552 <vscale x 8 x i16> %1,
1554 <vscale x 8 x i1> %3,
1557 ret <vscale x 8 x i16> %a
1560 declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
1561 <vscale x 16 x i16>,
1562 <vscale x 16 x i16>,
1566 define <vscale x 16 x i16> @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
1567 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16:
1568 ; CHECK: # %bb.0: # %entry
1569 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1570 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1573 %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
1574 <vscale x 16 x i16> undef,
1575 <vscale x 16 x i16> %0,
1579 ret <vscale x 16 x i16> %a
1582 declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
1583 <vscale x 16 x i16>,
1584 <vscale x 16 x i16>,
1590 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1591 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16:
1592 ; CHECK: # %bb.0: # %entry
1593 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1594 ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t
1597 %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
1598 <vscale x 16 x i16> %0,
1599 <vscale x 16 x i16> %1,
1601 <vscale x 16 x i1> %3,
1604 ret <vscale x 16 x i16> %a
1607 declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
1608 <vscale x 32 x i16>,
1609 <vscale x 32 x i16>,
1613 define <vscale x 32 x i16> @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
1614 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16:
1615 ; CHECK: # %bb.0: # %entry
1616 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1617 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1620 %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
1621 <vscale x 32 x i16> undef,
1622 <vscale x 32 x i16> %0,
1626 ret <vscale x 32 x i16> %a
1629 declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
1630 <vscale x 32 x i16>,
1631 <vscale x 32 x i16>,
1637 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1638 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16:
1639 ; CHECK: # %bb.0: # %entry
1640 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1641 ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t
1644 %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
1645 <vscale x 32 x i16> %0,
1646 <vscale x 32 x i16> %1,
1648 <vscale x 32 x i1> %3,
1651 ret <vscale x 32 x i16> %a
1654 declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
1660 define <vscale x 1 x i32> @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
1661 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32:
1662 ; CHECK: # %bb.0: # %entry
1663 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1664 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1667 %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
1668 <vscale x 1 x i32> undef,
1669 <vscale x 1 x i32> %0,
1673 ret <vscale x 1 x i32> %a
1676 declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
1684 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1685 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32:
1686 ; CHECK: # %bb.0: # %entry
1687 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1688 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1691 %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
1692 <vscale x 1 x i32> %0,
1693 <vscale x 1 x i32> %1,
1695 <vscale x 1 x i1> %3,
1698 ret <vscale x 1 x i32> %a
1701 declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
1707 define <vscale x 2 x i32> @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
1708 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32:
1709 ; CHECK: # %bb.0: # %entry
1710 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1711 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1714 %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
1715 <vscale x 2 x i32> undef,
1716 <vscale x 2 x i32> %0,
1720 ret <vscale x 2 x i32> %a
1723 declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
1731 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1732 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32:
1733 ; CHECK: # %bb.0: # %entry
1734 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1735 ; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
1738 %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
1739 <vscale x 2 x i32> %0,
1740 <vscale x 2 x i32> %1,
1742 <vscale x 2 x i1> %3,
1745 ret <vscale x 2 x i32> %a
1748 declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
1754 define <vscale x 4 x i32> @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
1755 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32:
1756 ; CHECK: # %bb.0: # %entry
1757 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1758 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1761 %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
1762 <vscale x 4 x i32> undef,
1763 <vscale x 4 x i32> %0,
1767 ret <vscale x 4 x i32> %a
1770 declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
1778 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1779 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32:
1780 ; CHECK: # %bb.0: # %entry
1781 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1782 ; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t
1785 %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
1786 <vscale x 4 x i32> %0,
1787 <vscale x 4 x i32> %1,
1789 <vscale x 4 x i1> %3,
1792 ret <vscale x 4 x i32> %a
1795 declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
1801 define <vscale x 8 x i32> @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
1802 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32:
1803 ; CHECK: # %bb.0: # %entry
1804 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1805 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1808 %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
1809 <vscale x 8 x i32> undef,
1810 <vscale x 8 x i32> %0,
1814 ret <vscale x 8 x i32> %a
1817 declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
1825 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1826 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32:
1827 ; CHECK: # %bb.0: # %entry
1828 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1829 ; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t
1832 %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
1833 <vscale x 8 x i32> %0,
1834 <vscale x 8 x i32> %1,
1836 <vscale x 8 x i1> %3,
1839 ret <vscale x 8 x i32> %a
1842 declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
1843 <vscale x 16 x i32>,
1844 <vscale x 16 x i32>,
1848 define <vscale x 16 x i32> @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
1849 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32:
1850 ; CHECK: # %bb.0: # %entry
1851 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1852 ; CHECK-NEXT: vsadd.vx v8, v8, a0
1855 %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
1856 <vscale x 16 x i32> undef,
1857 <vscale x 16 x i32> %0,
1861 ret <vscale x 16 x i32> %a
1864 declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
1865 <vscale x 16 x i32>,
1866 <vscale x 16 x i32>,
1872 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1873 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32:
1874 ; CHECK: # %bb.0: # %entry
1875 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1876 ; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t
1879 %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
1880 <vscale x 16 x i32> %0,
1881 <vscale x 16 x i32> %1,
1883 <vscale x 16 x i1> %3,
1886 ret <vscale x 16 x i32> %a
1889 declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
1895 define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
1896 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
1897 ; CHECK: # %bb.0: # %entry
1898 ; CHECK-NEXT: addi sp, sp, -16
1899 ; CHECK-NEXT: sw a1, 12(sp)
1900 ; CHECK-NEXT: sw a0, 8(sp)
1901 ; CHECK-NEXT: addi a0, sp, 8
1902 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1903 ; CHECK-NEXT: vlse64.v v9, (a0), zero
1904 ; CHECK-NEXT: vsadd.vv v8, v8, v9
1905 ; CHECK-NEXT: addi sp, sp, 16
1908 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
1909 <vscale x 1 x i64> undef,
1910 <vscale x 1 x i64> %0,
1914 ret <vscale x 1 x i64> %a
1917 declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
1925 define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1926 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64:
1927 ; CHECK: # %bb.0: # %entry
1928 ; CHECK-NEXT: addi sp, sp, -16
1929 ; CHECK-NEXT: sw a1, 12(sp)
1930 ; CHECK-NEXT: sw a0, 8(sp)
1931 ; CHECK-NEXT: addi a0, sp, 8
1932 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1933 ; CHECK-NEXT: vlse64.v v10, (a0), zero
1934 ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
1935 ; CHECK-NEXT: addi sp, sp, 16
1938 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
1939 <vscale x 1 x i64> %0,
1940 <vscale x 1 x i64> %1,
1942 <vscale x 1 x i1> %3,
1945 ret <vscale x 1 x i64> %a
1948 declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
1954 define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
1955 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64:
1956 ; CHECK: # %bb.0: # %entry
1957 ; CHECK-NEXT: addi sp, sp, -16
1958 ; CHECK-NEXT: sw a1, 12(sp)
1959 ; CHECK-NEXT: sw a0, 8(sp)
1960 ; CHECK-NEXT: addi a0, sp, 8
1961 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1962 ; CHECK-NEXT: vlse64.v v10, (a0), zero
1963 ; CHECK-NEXT: vsadd.vv v8, v8, v10
1964 ; CHECK-NEXT: addi sp, sp, 16
1967 %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
1968 <vscale x 2 x i64> undef,
1969 <vscale x 2 x i64> %0,
1973 ret <vscale x 2 x i64> %a
1976 declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
1984 define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1985 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64:
1986 ; CHECK: # %bb.0: # %entry
1987 ; CHECK-NEXT: addi sp, sp, -16
1988 ; CHECK-NEXT: sw a1, 12(sp)
1989 ; CHECK-NEXT: sw a0, 8(sp)
1990 ; CHECK-NEXT: addi a0, sp, 8
1991 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1992 ; CHECK-NEXT: vlse64.v v12, (a0), zero
1993 ; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
1994 ; CHECK-NEXT: addi sp, sp, 16
1997 %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
1998 <vscale x 2 x i64> %0,
1999 <vscale x 2 x i64> %1,
2001 <vscale x 2 x i1> %3,
2004 ret <vscale x 2 x i64> %a
2007 declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
2013 define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
2014 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64:
2015 ; CHECK: # %bb.0: # %entry
2016 ; CHECK-NEXT: addi sp, sp, -16
2017 ; CHECK-NEXT: sw a1, 12(sp)
2018 ; CHECK-NEXT: sw a0, 8(sp)
2019 ; CHECK-NEXT: addi a0, sp, 8
2020 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2021 ; CHECK-NEXT: vlse64.v v12, (a0), zero
2022 ; CHECK-NEXT: vsadd.vv v8, v8, v12
2023 ; CHECK-NEXT: addi sp, sp, 16
2026 %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
2027 <vscale x 4 x i64> undef,
2028 <vscale x 4 x i64> %0,
2032 ret <vscale x 4 x i64> %a
2035 declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
2043 define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2044 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64:
2045 ; CHECK: # %bb.0: # %entry
2046 ; CHECK-NEXT: addi sp, sp, -16
2047 ; CHECK-NEXT: sw a1, 12(sp)
2048 ; CHECK-NEXT: sw a0, 8(sp)
2049 ; CHECK-NEXT: addi a0, sp, 8
2050 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2051 ; CHECK-NEXT: vlse64.v v16, (a0), zero
2052 ; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
2053 ; CHECK-NEXT: addi sp, sp, 16
2056 %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
2057 <vscale x 4 x i64> %0,
2058 <vscale x 4 x i64> %1,
2060 <vscale x 4 x i1> %3,
2063 ret <vscale x 4 x i64> %a
2066 declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
2072 define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
2073 ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64:
2074 ; CHECK: # %bb.0: # %entry
2075 ; CHECK-NEXT: addi sp, sp, -16
2076 ; CHECK-NEXT: sw a1, 12(sp)
2077 ; CHECK-NEXT: sw a0, 8(sp)
2078 ; CHECK-NEXT: addi a0, sp, 8
2079 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2080 ; CHECK-NEXT: vlse64.v v16, (a0), zero
2081 ; CHECK-NEXT: vsadd.vv v8, v8, v16
2082 ; CHECK-NEXT: addi sp, sp, 16
2085 %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
2086 <vscale x 8 x i64> undef,
2087 <vscale x 8 x i64> %0,
2091 ret <vscale x 8 x i64> %a
2094 declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
2102 define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2103 ; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64:
2104 ; CHECK: # %bb.0: # %entry
2105 ; CHECK-NEXT: addi sp, sp, -16
2106 ; CHECK-NEXT: sw a1, 12(sp)
2107 ; CHECK-NEXT: sw a0, 8(sp)
2108 ; CHECK-NEXT: addi a0, sp, 8
2109 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2110 ; CHECK-NEXT: vlse64.v v24, (a0), zero
2111 ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
2112 ; CHECK-NEXT: addi sp, sp, 16
2115 %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
2116 <vscale x 8 x i64> %0,
2117 <vscale x 8 x i64> %1,
2119 <vscale x 8 x i1> %3,
2122 ret <vscale x 8 x i64> %a
2125 define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
2126 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8:
2127 ; CHECK: # %bb.0: # %entry
2128 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2129 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2132 %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
2133 <vscale x 1 x i8> undef,
2134 <vscale x 1 x i8> %0,
2138 ret <vscale x 1 x i8> %a
2141 define <vscale x 1 x i8> @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
2142 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8:
2143 ; CHECK: # %bb.0: # %entry
2144 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
2145 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2148 %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
2149 <vscale x 1 x i8> %0,
2150 <vscale x 1 x i8> %1,
2152 <vscale x 1 x i1> %2,
2155 ret <vscale x 1 x i8> %a
2158 define <vscale x 2 x i8> @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
2159 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8:
2160 ; CHECK: # %bb.0: # %entry
2161 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2162 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2165 %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
2166 <vscale x 2 x i8> undef,
2167 <vscale x 2 x i8> %0,
2171 ret <vscale x 2 x i8> %a
2174 define <vscale x 2 x i8> @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
2175 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8:
2176 ; CHECK: # %bb.0: # %entry
2177 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
2178 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2181 %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
2182 <vscale x 2 x i8> %0,
2183 <vscale x 2 x i8> %1,
2185 <vscale x 2 x i1> %2,
2188 ret <vscale x 2 x i8> %a
2191 define <vscale x 4 x i8> @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
2192 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8:
2193 ; CHECK: # %bb.0: # %entry
2194 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2195 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2198 %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
2199 <vscale x 4 x i8> undef,
2200 <vscale x 4 x i8> %0,
2204 ret <vscale x 4 x i8> %a
2207 define <vscale x 4 x i8> @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2208 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8:
2209 ; CHECK: # %bb.0: # %entry
2210 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
2211 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2214 %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
2215 <vscale x 4 x i8> %0,
2216 <vscale x 4 x i8> %1,
2218 <vscale x 4 x i1> %2,
2221 ret <vscale x 4 x i8> %a
2224 define <vscale x 8 x i8> @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
2225 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8:
2226 ; CHECK: # %bb.0: # %entry
2227 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2228 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2231 %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
2232 <vscale x 8 x i8> undef,
2233 <vscale x 8 x i8> %0,
2237 ret <vscale x 8 x i8> %a
2240 define <vscale x 8 x i8> @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
2241 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8:
2242 ; CHECK: # %bb.0: # %entry
2243 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
2244 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2247 %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
2248 <vscale x 8 x i8> %0,
2249 <vscale x 8 x i8> %1,
2251 <vscale x 8 x i1> %2,
2254 ret <vscale x 8 x i8> %a
2257 define <vscale x 16 x i8> @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
2258 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8:
2259 ; CHECK: # %bb.0: # %entry
2260 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2261 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2264 %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
2265 <vscale x 16 x i8> undef,
2266 <vscale x 16 x i8> %0,
2270 ret <vscale x 16 x i8> %a
2273 define <vscale x 16 x i8> @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
2274 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8:
2275 ; CHECK: # %bb.0: # %entry
2276 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2277 ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
2280 %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
2281 <vscale x 16 x i8> %0,
2282 <vscale x 16 x i8> %1,
2284 <vscale x 16 x i1> %2,
2287 ret <vscale x 16 x i8> %a
2290 define <vscale x 32 x i8> @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
2291 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8:
2292 ; CHECK: # %bb.0: # %entry
2293 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2294 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2297 %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
2298 <vscale x 32 x i8> undef,
2299 <vscale x 32 x i8> %0,
2303 ret <vscale x 32 x i8> %a
2306 define <vscale x 32 x i8> @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
2307 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8:
2308 ; CHECK: # %bb.0: # %entry
2309 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2310 ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
2313 %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
2314 <vscale x 32 x i8> %0,
2315 <vscale x 32 x i8> %1,
2317 <vscale x 32 x i1> %2,
2320 ret <vscale x 32 x i8> %a
2323 define <vscale x 64 x i8> @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
2324 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8:
2325 ; CHECK: # %bb.0: # %entry
2326 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2327 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2330 %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
2331 <vscale x 64 x i8> undef,
2332 <vscale x 64 x i8> %0,
2336 ret <vscale x 64 x i8> %a
2339 define <vscale x 64 x i8> @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
2340 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8:
2341 ; CHECK: # %bb.0: # %entry
2342 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
2343 ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
2346 %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
2347 <vscale x 64 x i8> %0,
2348 <vscale x 64 x i8> %1,
2350 <vscale x 64 x i1> %2,
2353 ret <vscale x 64 x i8> %a
2356 define <vscale x 1 x i16> @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
2357 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16:
2358 ; CHECK: # %bb.0: # %entry
2359 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2360 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2363 %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
2364 <vscale x 1 x i16> undef,
2365 <vscale x 1 x i16> %0,
2369 ret <vscale x 1 x i16> %a
2372 define <vscale x 1 x i16> @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
2373 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16:
2374 ; CHECK: # %bb.0: # %entry
2375 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2376 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2379 %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
2380 <vscale x 1 x i16> %0,
2381 <vscale x 1 x i16> %1,
2383 <vscale x 1 x i1> %2,
2386 ret <vscale x 1 x i16> %a
2389 define <vscale x 2 x i16> @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
2390 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16:
2391 ; CHECK: # %bb.0: # %entry
2392 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2393 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2396 %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
2397 <vscale x 2 x i16> undef,
2398 <vscale x 2 x i16> %0,
2402 ret <vscale x 2 x i16> %a
2405 define <vscale x 2 x i16> @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
2406 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16:
2407 ; CHECK: # %bb.0: # %entry
2408 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2409 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2412 %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
2413 <vscale x 2 x i16> %0,
2414 <vscale x 2 x i16> %1,
2416 <vscale x 2 x i1> %2,
2419 ret <vscale x 2 x i16> %a
2422 define <vscale x 4 x i16> @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
2423 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16:
2424 ; CHECK: # %bb.0: # %entry
2425 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2426 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2429 %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
2430 <vscale x 4 x i16> undef,
2431 <vscale x 4 x i16> %0,
2435 ret <vscale x 4 x i16> %a
2438 define <vscale x 4 x i16> @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2439 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16:
2440 ; CHECK: # %bb.0: # %entry
2441 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2442 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2445 %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
2446 <vscale x 4 x i16> %0,
2447 <vscale x 4 x i16> %1,
2449 <vscale x 4 x i1> %2,
2452 ret <vscale x 4 x i16> %a
2455 define <vscale x 8 x i16> @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
2456 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16:
2457 ; CHECK: # %bb.0: # %entry
2458 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2459 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2462 %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
2463 <vscale x 8 x i16> undef,
2464 <vscale x 8 x i16> %0,
2468 ret <vscale x 8 x i16> %a
2471 define <vscale x 8 x i16> @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
2472 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16:
2473 ; CHECK: # %bb.0: # %entry
2474 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2475 ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
2478 %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
2479 <vscale x 8 x i16> %0,
2480 <vscale x 8 x i16> %1,
2482 <vscale x 8 x i1> %2,
2485 ret <vscale x 8 x i16> %a
2488 define <vscale x 16 x i16> @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
2489 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16:
2490 ; CHECK: # %bb.0: # %entry
2491 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2492 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2495 %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
2496 <vscale x 16 x i16> undef,
2497 <vscale x 16 x i16> %0,
2501 ret <vscale x 16 x i16> %a
2504 define <vscale x 16 x i16> @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
2505 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16:
2506 ; CHECK: # %bb.0: # %entry
2507 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2508 ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
2511 %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
2512 <vscale x 16 x i16> %0,
2513 <vscale x 16 x i16> %1,
2515 <vscale x 16 x i1> %2,
2518 ret <vscale x 16 x i16> %a
2521 define <vscale x 32 x i16> @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
2522 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16:
2523 ; CHECK: # %bb.0: # %entry
2524 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2525 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2528 %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
2529 <vscale x 32 x i16> undef,
2530 <vscale x 32 x i16> %0,
2534 ret <vscale x 32 x i16> %a
2537 define <vscale x 32 x i16> @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
2538 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16:
2539 ; CHECK: # %bb.0: # %entry
2540 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
2541 ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
2544 %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
2545 <vscale x 32 x i16> %0,
2546 <vscale x 32 x i16> %1,
2548 <vscale x 32 x i1> %2,
2551 ret <vscale x 32 x i16> %a
2554 define <vscale x 1 x i32> @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
2555 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32:
2556 ; CHECK: # %bb.0: # %entry
2557 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2558 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2561 %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
2562 <vscale x 1 x i32> undef,
2563 <vscale x 1 x i32> %0,
2567 ret <vscale x 1 x i32> %a
2570 define <vscale x 1 x i32> @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
2571 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32:
2572 ; CHECK: # %bb.0: # %entry
2573 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2574 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2577 %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
2578 <vscale x 1 x i32> %0,
2579 <vscale x 1 x i32> %1,
2581 <vscale x 1 x i1> %2,
2584 ret <vscale x 1 x i32> %a
2587 define <vscale x 2 x i32> @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
2588 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32:
2589 ; CHECK: # %bb.0: # %entry
2590 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2591 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2594 %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
2595 <vscale x 2 x i32> undef,
2596 <vscale x 2 x i32> %0,
2600 ret <vscale x 2 x i32> %a
2603 define <vscale x 2 x i32> @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
2604 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32:
2605 ; CHECK: # %bb.0: # %entry
2606 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2607 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2610 %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
2611 <vscale x 2 x i32> %0,
2612 <vscale x 2 x i32> %1,
2614 <vscale x 2 x i1> %2,
2617 ret <vscale x 2 x i32> %a
2620 define <vscale x 4 x i32> @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
2621 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32:
2622 ; CHECK: # %bb.0: # %entry
2623 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2624 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2627 %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
2628 <vscale x 4 x i32> undef,
2629 <vscale x 4 x i32> %0,
2633 ret <vscale x 4 x i32> %a
2636 define <vscale x 4 x i32> @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2637 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32:
2638 ; CHECK: # %bb.0: # %entry
2639 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2640 ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
2643 %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
2644 <vscale x 4 x i32> %0,
2645 <vscale x 4 x i32> %1,
2647 <vscale x 4 x i1> %2,
2650 ret <vscale x 4 x i32> %a
2653 define <vscale x 8 x i32> @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
2654 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32:
2655 ; CHECK: # %bb.0: # %entry
2656 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2657 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2660 %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
2661 <vscale x 8 x i32> undef,
2662 <vscale x 8 x i32> %0,
2666 ret <vscale x 8 x i32> %a
2669 define <vscale x 8 x i32> @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
2670 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32:
2671 ; CHECK: # %bb.0: # %entry
2672 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2673 ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
2676 %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
2677 <vscale x 8 x i32> %0,
2678 <vscale x 8 x i32> %1,
2680 <vscale x 8 x i1> %2,
2683 ret <vscale x 8 x i32> %a
2686 define <vscale x 16 x i32> @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
2687 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32:
2688 ; CHECK: # %bb.0: # %entry
2689 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2690 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2693 %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
2694 <vscale x 16 x i32> undef,
2695 <vscale x 16 x i32> %0,
2699 ret <vscale x 16 x i32> %a
2702 define <vscale x 16 x i32> @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
2703 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32:
2704 ; CHECK: # %bb.0: # %entry
2705 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
2706 ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
2709 %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
2710 <vscale x 16 x i32> %0,
2711 <vscale x 16 x i32> %1,
2713 <vscale x 16 x i1> %2,
2716 ret <vscale x 16 x i32> %a
2719 define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
2720 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64:
2721 ; CHECK: # %bb.0: # %entry
2722 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2723 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2726 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
2727 <vscale x 1 x i64> undef,
2728 <vscale x 1 x i64> %0,
2732 ret <vscale x 1 x i64> %a
2735 define <vscale x 1 x i64> @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
2736 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64:
2737 ; CHECK: # %bb.0: # %entry
2738 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2739 ; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
2742 %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
2743 <vscale x 1 x i64> %0,
2744 <vscale x 1 x i64> %1,
2746 <vscale x 1 x i1> %2,
2749 ret <vscale x 1 x i64> %a
2752 define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
2753 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64:
2754 ; CHECK: # %bb.0: # %entry
2755 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2756 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2759 %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
2760 <vscale x 2 x i64> undef,
2761 <vscale x 2 x i64> %0,
2765 ret <vscale x 2 x i64> %a
2768 define <vscale x 2 x i64> @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
2769 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64:
2770 ; CHECK: # %bb.0: # %entry
2771 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2772 ; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
2775 %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
2776 <vscale x 2 x i64> %0,
2777 <vscale x 2 x i64> %1,
2779 <vscale x 2 x i1> %2,
2782 ret <vscale x 2 x i64> %a
2785 define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
2786 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64:
2787 ; CHECK: # %bb.0: # %entry
2788 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2789 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2792 %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
2793 <vscale x 4 x i64> undef,
2794 <vscale x 4 x i64> %0,
2798 ret <vscale x 4 x i64> %a
2801 define <vscale x 4 x i64> @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2802 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64:
2803 ; CHECK: # %bb.0: # %entry
2804 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2805 ; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
2808 %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
2809 <vscale x 4 x i64> %0,
2810 <vscale x 4 x i64> %1,
2812 <vscale x 4 x i1> %2,
2815 ret <vscale x 4 x i64> %a
2818 define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
2819 ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64:
2820 ; CHECK: # %bb.0: # %entry
2821 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2822 ; CHECK-NEXT: vsadd.vi v8, v8, 9
2825 %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
2826 <vscale x 8 x i64> undef,
2827 <vscale x 8 x i64> %0,
2831 ret <vscale x 8 x i64> %a
2834 define <vscale x 8 x i64> @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
2835 ; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64:
2836 ; CHECK: # %bb.0: # %entry
2837 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
2838 ; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
2841 %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
2842 <vscale x 8 x i64> %0,
2843 <vscale x 8 x i64> %1,
2845 <vscale x 8 x i1> %2,
2848 ret <vscale x 8 x i64> %a