1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vadd.vv v8, v8, v9
20 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
21 <vscale x 1 x i8> undef,
26 ret <vscale x 1 x i8> %a
29 declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
36 define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
40 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
43 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
50 ret <vscale x 1 x i8> %a
53 declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
59 define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
63 ; CHECK-NEXT: vadd.vv v8, v8, v9
66 %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
67 <vscale x 2 x i8> undef,
72 ret <vscale x 2 x i8> %a
75 declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
82 define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
86 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
89 %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
96 ret <vscale x 2 x i8> %a
99 declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
105 define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
106 ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
109 ; CHECK-NEXT: vadd.vv v8, v8, v9
112 %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
113 <vscale x 4 x i8> undef,
114 <vscale x 4 x i8> %0,
115 <vscale x 4 x i8> %1,
118 ret <vscale x 4 x i8> %a
121 declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
128 define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
132 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
135 %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
136 <vscale x 4 x i8> %0,
137 <vscale x 4 x i8> %1,
138 <vscale x 4 x i8> %2,
139 <vscale x 4 x i1> %3,
142 ret <vscale x 4 x i8> %a
145 declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
151 define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
152 ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
155 ; CHECK-NEXT: vadd.vv v8, v8, v9
158 %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
159 <vscale x 8 x i8> undef,
160 <vscale x 8 x i8> %0,
161 <vscale x 8 x i8> %1,
164 ret <vscale x 8 x i8> %a
167 declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
174 define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
178 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
181 %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
182 <vscale x 8 x i8> %0,
183 <vscale x 8 x i8> %1,
184 <vscale x 8 x i8> %2,
185 <vscale x 8 x i1> %3,
188 ret <vscale x 8 x i8> %a
191 declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
197 define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
198 ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
201 ; CHECK-NEXT: vadd.vv v8, v8, v10
204 %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
205 <vscale x 16 x i8> undef,
206 <vscale x 16 x i8> %0,
207 <vscale x 16 x i8> %1,
210 ret <vscale x 16 x i8> %a
213 declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
220 define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
224 ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
227 %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
228 <vscale x 16 x i8> %0,
229 <vscale x 16 x i8> %1,
230 <vscale x 16 x i8> %2,
231 <vscale x 16 x i1> %3,
234 ret <vscale x 16 x i8> %a
237 declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
243 define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
244 ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
247 ; CHECK-NEXT: vadd.vv v8, v8, v12
250 %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
251 <vscale x 32 x i8> undef,
252 <vscale x 32 x i8> %0,
253 <vscale x 32 x i8> %1,
256 ret <vscale x 32 x i8> %a
259 declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
266 define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
270 ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
273 %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
274 <vscale x 32 x i8> %0,
275 <vscale x 32 x i8> %1,
276 <vscale x 32 x i8> %2,
277 <vscale x 32 x i1> %3,
280 ret <vscale x 32 x i8> %a
283 declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
289 define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
290 ; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
293 ; CHECK-NEXT: vadd.vv v8, v8, v16
296 %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
297 <vscale x 64 x i8> undef,
298 <vscale x 64 x i8> %0,
299 <vscale x 64 x i8> %1,
302 ret <vscale x 64 x i8> %a
305 declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
312 define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
313 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vl8r.v v24, (a0)
316 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
317 ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
320 %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
321 <vscale x 64 x i8> %0,
322 <vscale x 64 x i8> %1,
323 <vscale x 64 x i8> %2,
324 <vscale x 64 x i1> %3,
327 ret <vscale x 64 x i8> %a
330 declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
336 define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
337 ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16:
338 ; CHECK: # %bb.0: # %entry
339 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
340 ; CHECK-NEXT: vadd.vv v8, v8, v9
343 %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
344 <vscale x 1 x i16> undef,
345 <vscale x 1 x i16> %0,
346 <vscale x 1 x i16> %1,
349 ret <vscale x 1 x i16> %a
352 declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
359 define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
360 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
361 ; CHECK: # %bb.0: # %entry
362 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
363 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
366 %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
367 <vscale x 1 x i16> %0,
368 <vscale x 1 x i16> %1,
369 <vscale x 1 x i16> %2,
370 <vscale x 1 x i1> %3,
373 ret <vscale x 1 x i16> %a
376 declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
382 define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
383 ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16:
384 ; CHECK: # %bb.0: # %entry
385 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
386 ; CHECK-NEXT: vadd.vv v8, v8, v9
389 %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
390 <vscale x 2 x i16> undef,
391 <vscale x 2 x i16> %0,
392 <vscale x 2 x i16> %1,
395 ret <vscale x 2 x i16> %a
398 declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
405 define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
406 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
407 ; CHECK: # %bb.0: # %entry
408 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
409 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
412 %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
413 <vscale x 2 x i16> %0,
414 <vscale x 2 x i16> %1,
415 <vscale x 2 x i16> %2,
416 <vscale x 2 x i1> %3,
419 ret <vscale x 2 x i16> %a
422 declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
428 define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
429 ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
432 ; CHECK-NEXT: vadd.vv v8, v8, v9
435 %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
436 <vscale x 4 x i16> undef,
437 <vscale x 4 x i16> %0,
438 <vscale x 4 x i16> %1,
441 ret <vscale x 4 x i16> %a
444 declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
451 define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
452 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
453 ; CHECK: # %bb.0: # %entry
454 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
455 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
458 %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
459 <vscale x 4 x i16> %0,
460 <vscale x 4 x i16> %1,
461 <vscale x 4 x i16> %2,
462 <vscale x 4 x i1> %3,
465 ret <vscale x 4 x i16> %a
468 declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
474 define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
475 ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16:
476 ; CHECK: # %bb.0: # %entry
477 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
478 ; CHECK-NEXT: vadd.vv v8, v8, v10
481 %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
482 <vscale x 8 x i16> undef,
483 <vscale x 8 x i16> %0,
484 <vscale x 8 x i16> %1,
487 ret <vscale x 8 x i16> %a
490 declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
497 define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
498 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
499 ; CHECK: # %bb.0: # %entry
500 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
501 ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
504 %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
505 <vscale x 8 x i16> %0,
506 <vscale x 8 x i16> %1,
507 <vscale x 8 x i16> %2,
508 <vscale x 8 x i1> %3,
511 ret <vscale x 8 x i16> %a
514 declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
520 define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
521 ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16:
522 ; CHECK: # %bb.0: # %entry
523 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
524 ; CHECK-NEXT: vadd.vv v8, v8, v12
527 %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
528 <vscale x 16 x i16> undef,
529 <vscale x 16 x i16> %0,
530 <vscale x 16 x i16> %1,
533 ret <vscale x 16 x i16> %a
536 declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
543 define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
544 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
545 ; CHECK: # %bb.0: # %entry
546 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
547 ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
550 %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
551 <vscale x 16 x i16> %0,
552 <vscale x 16 x i16> %1,
553 <vscale x 16 x i16> %2,
554 <vscale x 16 x i1> %3,
557 ret <vscale x 16 x i16> %a
560 declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
566 define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
567 ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
570 ; CHECK-NEXT: vadd.vv v8, v8, v16
573 %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
574 <vscale x 32 x i16> undef,
575 <vscale x 32 x i16> %0,
576 <vscale x 32 x i16> %1,
579 ret <vscale x 32 x i16> %a
582 declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
589 define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
590 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
591 ; CHECK: # %bb.0: # %entry
592 ; CHECK-NEXT: vl8re16.v v24, (a0)
593 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
594 ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
597 %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
598 <vscale x 32 x i16> %0,
599 <vscale x 32 x i16> %1,
600 <vscale x 32 x i16> %2,
601 <vscale x 32 x i1> %3,
604 ret <vscale x 32 x i16> %a
607 declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
613 define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
614 ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
617 ; CHECK-NEXT: vadd.vv v8, v8, v9
620 %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
621 <vscale x 1 x i32> undef,
622 <vscale x 1 x i32> %0,
623 <vscale x 1 x i32> %1,
626 ret <vscale x 1 x i32> %a
629 declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
636 define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
637 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
640 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
643 %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
644 <vscale x 1 x i32> %0,
645 <vscale x 1 x i32> %1,
646 <vscale x 1 x i32> %2,
647 <vscale x 1 x i1> %3,
650 ret <vscale x 1 x i32> %a
653 declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
659 define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
660 ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32:
661 ; CHECK: # %bb.0: # %entry
662 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
663 ; CHECK-NEXT: vadd.vv v8, v8, v9
666 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
667 <vscale x 2 x i32> undef,
668 <vscale x 2 x i32> %0,
669 <vscale x 2 x i32> %1,
672 ret <vscale x 2 x i32> %a
675 declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
682 define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
683 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
684 ; CHECK: # %bb.0: # %entry
685 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
686 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
689 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
690 <vscale x 2 x i32> %0,
691 <vscale x 2 x i32> %1,
692 <vscale x 2 x i32> %2,
693 <vscale x 2 x i1> %3,
696 ret <vscale x 2 x i32> %a
699 declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
705 define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
706 ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
709 ; CHECK-NEXT: vadd.vv v8, v8, v10
712 %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
713 <vscale x 4 x i32> undef,
714 <vscale x 4 x i32> %0,
715 <vscale x 4 x i32> %1,
718 ret <vscale x 4 x i32> %a
721 declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
728 define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
729 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
730 ; CHECK: # %bb.0: # %entry
731 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
732 ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
735 %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
736 <vscale x 4 x i32> %0,
737 <vscale x 4 x i32> %1,
738 <vscale x 4 x i32> %2,
739 <vscale x 4 x i1> %3,
742 ret <vscale x 4 x i32> %a
745 declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
751 define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
752 ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32:
753 ; CHECK: # %bb.0: # %entry
754 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
755 ; CHECK-NEXT: vadd.vv v8, v8, v12
758 %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
759 <vscale x 8 x i32> undef,
760 <vscale x 8 x i32> %0,
761 <vscale x 8 x i32> %1,
764 ret <vscale x 8 x i32> %a
767 declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
774 define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
775 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
776 ; CHECK: # %bb.0: # %entry
777 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
778 ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
781 %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
782 <vscale x 8 x i32> %0,
783 <vscale x 8 x i32> %1,
784 <vscale x 8 x i32> %2,
785 <vscale x 8 x i1> %3,
788 ret <vscale x 8 x i32> %a
791 declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
797 define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
798 ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32:
799 ; CHECK: # %bb.0: # %entry
800 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
801 ; CHECK-NEXT: vadd.vv v8, v8, v16
804 %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
805 <vscale x 16 x i32> undef,
806 <vscale x 16 x i32> %0,
807 <vscale x 16 x i32> %1,
810 ret <vscale x 16 x i32> %a
813 declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
820 define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
821 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
822 ; CHECK: # %bb.0: # %entry
823 ; CHECK-NEXT: vl8re32.v v24, (a0)
824 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
825 ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
828 %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
829 <vscale x 16 x i32> %0,
830 <vscale x 16 x i32> %1,
831 <vscale x 16 x i32> %2,
832 <vscale x 16 x i1> %3,
835 ret <vscale x 16 x i32> %a
838 declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
844 define <vscale x 1 x i64> @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
845 ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
848 ; CHECK-NEXT: vadd.vv v8, v8, v9
851 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
852 <vscale x 1 x i64> undef,
853 <vscale x 1 x i64> %0,
854 <vscale x 1 x i64> %1,
857 ret <vscale x 1 x i64> %a
860 declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
867 define <vscale x 1 x i64> @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
868 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
869 ; CHECK: # %bb.0: # %entry
870 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
871 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
874 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
875 <vscale x 1 x i64> %0,
876 <vscale x 1 x i64> %1,
877 <vscale x 1 x i64> %2,
878 <vscale x 1 x i1> %3,
881 ret <vscale x 1 x i64> %a
884 declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
890 define <vscale x 2 x i64> @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
891 ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64:
892 ; CHECK: # %bb.0: # %entry
893 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
894 ; CHECK-NEXT: vadd.vv v8, v8, v10
897 %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
898 <vscale x 2 x i64> undef,
899 <vscale x 2 x i64> %0,
900 <vscale x 2 x i64> %1,
903 ret <vscale x 2 x i64> %a
906 declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
913 define <vscale x 2 x i64> @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
914 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
915 ; CHECK: # %bb.0: # %entry
916 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
917 ; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
920 %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
921 <vscale x 2 x i64> %0,
922 <vscale x 2 x i64> %1,
923 <vscale x 2 x i64> %2,
924 <vscale x 2 x i1> %3,
927 ret <vscale x 2 x i64> %a
930 declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
936 define <vscale x 4 x i64> @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
937 ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64:
938 ; CHECK: # %bb.0: # %entry
939 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
940 ; CHECK-NEXT: vadd.vv v8, v8, v12
943 %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
944 <vscale x 4 x i64> undef,
945 <vscale x 4 x i64> %0,
946 <vscale x 4 x i64> %1,
949 ret <vscale x 4 x i64> %a
952 declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
959 define <vscale x 4 x i64> @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
960 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
961 ; CHECK: # %bb.0: # %entry
962 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
963 ; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
966 %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
967 <vscale x 4 x i64> %0,
968 <vscale x 4 x i64> %1,
969 <vscale x 4 x i64> %2,
970 <vscale x 4 x i1> %3,
973 ret <vscale x 4 x i64> %a
976 declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
982 define <vscale x 8 x i64> @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
983 ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64:
984 ; CHECK: # %bb.0: # %entry
985 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
986 ; CHECK-NEXT: vadd.vv v8, v8, v16
989 %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
990 <vscale x 8 x i64> undef,
991 <vscale x 8 x i64> %0,
992 <vscale x 8 x i64> %1,
995 ret <vscale x 8 x i64> %a
998 declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
1005 define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1006 ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1007 ; CHECK: # %bb.0: # %entry
1008 ; CHECK-NEXT: vl8re64.v v24, (a0)
1009 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1010 ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
1013 %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
1014 <vscale x 8 x i64> %0,
1015 <vscale x 8 x i64> %1,
1016 <vscale x 8 x i64> %2,
1017 <vscale x 8 x i1> %3,
1020 ret <vscale x 8 x i64> %a
1023 declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
1029 define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1030 ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8:
1031 ; CHECK: # %bb.0: # %entry
1032 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1033 ; CHECK-NEXT: vadd.vx v8, v8, a0
1036 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
1037 <vscale x 1 x i8> undef,
1038 <vscale x 1 x i8> %0,
1042 ret <vscale x 1 x i8> %a
1045 declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
1052 define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1053 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8:
1054 ; CHECK: # %bb.0: # %entry
1055 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1056 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1059 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
1060 <vscale x 1 x i8> %0,
1061 <vscale x 1 x i8> %1,
1063 <vscale x 1 x i1> %3,
1066 ret <vscale x 1 x i8> %a
1069 declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
1075 define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1076 ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8:
1077 ; CHECK: # %bb.0: # %entry
1078 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1079 ; CHECK-NEXT: vadd.vx v8, v8, a0
1082 %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
1083 <vscale x 2 x i8> undef,
1084 <vscale x 2 x i8> %0,
1088 ret <vscale x 2 x i8> %a
1091 declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
1098 define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1099 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8:
1100 ; CHECK: # %bb.0: # %entry
1101 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1102 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1105 %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
1106 <vscale x 2 x i8> %0,
1107 <vscale x 2 x i8> %1,
1109 <vscale x 2 x i1> %3,
1112 ret <vscale x 2 x i8> %a
1115 declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
1121 define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1122 ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8:
1123 ; CHECK: # %bb.0: # %entry
1124 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1125 ; CHECK-NEXT: vadd.vx v8, v8, a0
1128 %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
1129 <vscale x 4 x i8> undef,
1130 <vscale x 4 x i8> %0,
1134 ret <vscale x 4 x i8> %a
1137 declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
1144 define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1145 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8:
1146 ; CHECK: # %bb.0: # %entry
1147 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1148 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1151 %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
1152 <vscale x 4 x i8> %0,
1153 <vscale x 4 x i8> %1,
1155 <vscale x 4 x i1> %3,
1158 ret <vscale x 4 x i8> %a
1161 declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
1167 define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1168 ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8:
1169 ; CHECK: # %bb.0: # %entry
1170 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1171 ; CHECK-NEXT: vadd.vx v8, v8, a0
1174 %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
1175 <vscale x 8 x i8> undef,
1176 <vscale x 8 x i8> %0,
1180 ret <vscale x 8 x i8> %a
1183 declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
1190 define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1191 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8:
1192 ; CHECK: # %bb.0: # %entry
1193 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1194 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1197 %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
1198 <vscale x 8 x i8> %0,
1199 <vscale x 8 x i8> %1,
1201 <vscale x 8 x i1> %3,
1204 ret <vscale x 8 x i8> %a
1207 declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
1213 define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1214 ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8:
1215 ; CHECK: # %bb.0: # %entry
1216 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1217 ; CHECK-NEXT: vadd.vx v8, v8, a0
1220 %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
1221 <vscale x 16 x i8> undef,
1222 <vscale x 16 x i8> %0,
1226 ret <vscale x 16 x i8> %a
1229 declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
1236 define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1237 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8:
1238 ; CHECK: # %bb.0: # %entry
1239 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1240 ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t
1243 %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
1244 <vscale x 16 x i8> %0,
1245 <vscale x 16 x i8> %1,
1247 <vscale x 16 x i1> %3,
1250 ret <vscale x 16 x i8> %a
1253 declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
1259 define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1260 ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8:
1261 ; CHECK: # %bb.0: # %entry
1262 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1263 ; CHECK-NEXT: vadd.vx v8, v8, a0
1266 %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
1267 <vscale x 32 x i8> undef,
1268 <vscale x 32 x i8> %0,
1272 ret <vscale x 32 x i8> %a
1275 declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
1282 define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1283 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8:
1284 ; CHECK: # %bb.0: # %entry
1285 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1286 ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t
1289 %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
1290 <vscale x 32 x i8> %0,
1291 <vscale x 32 x i8> %1,
1293 <vscale x 32 x i1> %3,
1296 ret <vscale x 32 x i8> %a
1299 declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
1305 define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1306 ; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8:
1307 ; CHECK: # %bb.0: # %entry
1308 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1309 ; CHECK-NEXT: vadd.vx v8, v8, a0
1312 %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
1313 <vscale x 64 x i8> undef,
1314 <vscale x 64 x i8> %0,
1318 ret <vscale x 64 x i8> %a
1321 declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
1328 define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1329 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8:
1330 ; CHECK: # %bb.0: # %entry
1331 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1332 ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t
1335 %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
1336 <vscale x 64 x i8> %0,
1337 <vscale x 64 x i8> %1,
1339 <vscale x 64 x i1> %3,
1342 ret <vscale x 64 x i8> %a
1345 declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
1351 define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1352 ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16:
1353 ; CHECK: # %bb.0: # %entry
1354 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1355 ; CHECK-NEXT: vadd.vx v8, v8, a0
1358 %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
1359 <vscale x 1 x i16> undef,
1360 <vscale x 1 x i16> %0,
1364 ret <vscale x 1 x i16> %a
1367 declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
1374 define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1375 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16:
1376 ; CHECK: # %bb.0: # %entry
1377 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1378 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1381 %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
1382 <vscale x 1 x i16> %0,
1383 <vscale x 1 x i16> %1,
1385 <vscale x 1 x i1> %3,
1388 ret <vscale x 1 x i16> %a
1391 declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
1397 define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1398 ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16:
1399 ; CHECK: # %bb.0: # %entry
1400 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1401 ; CHECK-NEXT: vadd.vx v8, v8, a0
1404 %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
1405 <vscale x 2 x i16> undef,
1406 <vscale x 2 x i16> %0,
1410 ret <vscale x 2 x i16> %a
1413 declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
1420 define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1421 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16:
1422 ; CHECK: # %bb.0: # %entry
1423 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1424 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1427 %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
1428 <vscale x 2 x i16> %0,
1429 <vscale x 2 x i16> %1,
1431 <vscale x 2 x i1> %3,
1434 ret <vscale x 2 x i16> %a
1437 declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
1443 define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1444 ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16:
1445 ; CHECK: # %bb.0: # %entry
1446 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1447 ; CHECK-NEXT: vadd.vx v8, v8, a0
1450 %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
1451 <vscale x 4 x i16> undef,
1452 <vscale x 4 x i16> %0,
1456 ret <vscale x 4 x i16> %a
1459 declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
1466 define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1467 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16:
1468 ; CHECK: # %bb.0: # %entry
1469 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1470 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1473 %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
1474 <vscale x 4 x i16> %0,
1475 <vscale x 4 x i16> %1,
1477 <vscale x 4 x i1> %3,
1480 ret <vscale x 4 x i16> %a
1483 declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
1489 define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1490 ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16:
1491 ; CHECK: # %bb.0: # %entry
1492 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1493 ; CHECK-NEXT: vadd.vx v8, v8, a0
1496 %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
1497 <vscale x 8 x i16> undef,
1498 <vscale x 8 x i16> %0,
1502 ret <vscale x 8 x i16> %a
1505 declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
1512 define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1513 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16:
1514 ; CHECK: # %bb.0: # %entry
1515 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1516 ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t
1519 %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
1520 <vscale x 8 x i16> %0,
1521 <vscale x 8 x i16> %1,
1523 <vscale x 8 x i1> %3,
1526 ret <vscale x 8 x i16> %a
1529 declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
1530 <vscale x 16 x i16>,
1531 <vscale x 16 x i16>,
1535 define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1536 ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16:
1537 ; CHECK: # %bb.0: # %entry
1538 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1539 ; CHECK-NEXT: vadd.vx v8, v8, a0
1542 %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
1543 <vscale x 16 x i16> undef,
1544 <vscale x 16 x i16> %0,
1548 ret <vscale x 16 x i16> %a
1551 declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
1552 <vscale x 16 x i16>,
1553 <vscale x 16 x i16>,
1558 define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1559 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16:
1560 ; CHECK: # %bb.0: # %entry
1561 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1562 ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t
1565 %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
1566 <vscale x 16 x i16> %0,
1567 <vscale x 16 x i16> %1,
1569 <vscale x 16 x i1> %3,
1572 ret <vscale x 16 x i16> %a
1575 declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
1576 <vscale x 32 x i16>,
1577 <vscale x 32 x i16>,
1581 define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1582 ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16:
1583 ; CHECK: # %bb.0: # %entry
1584 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1585 ; CHECK-NEXT: vadd.vx v8, v8, a0
1588 %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
1589 <vscale x 32 x i16> undef,
1590 <vscale x 32 x i16> %0,
1594 ret <vscale x 32 x i16> %a
1597 declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
1598 <vscale x 32 x i16>,
1599 <vscale x 32 x i16>,
1604 define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1605 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16:
1606 ; CHECK: # %bb.0: # %entry
1607 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1608 ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t
1611 %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
1612 <vscale x 32 x i16> %0,
1613 <vscale x 32 x i16> %1,
1615 <vscale x 32 x i1> %3,
1618 ret <vscale x 32 x i16> %a
1621 declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
1627 define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1628 ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32:
1629 ; CHECK: # %bb.0: # %entry
1630 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1631 ; CHECK-NEXT: vadd.vx v8, v8, a0
1634 %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
1635 <vscale x 1 x i32> undef,
1636 <vscale x 1 x i32> %0,
1640 ret <vscale x 1 x i32> %a
1643 declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
1650 define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1651 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32:
1652 ; CHECK: # %bb.0: # %entry
1653 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1654 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1657 %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
1658 <vscale x 1 x i32> %0,
1659 <vscale x 1 x i32> %1,
1661 <vscale x 1 x i1> %3,
1664 ret <vscale x 1 x i32> %a
1667 declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
1673 define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1674 ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32:
1675 ; CHECK: # %bb.0: # %entry
1676 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1677 ; CHECK-NEXT: vadd.vx v8, v8, a0
1680 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
1681 <vscale x 2 x i32> undef,
1682 <vscale x 2 x i32> %0,
1686 ret <vscale x 2 x i32> %a
1689 declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
1696 define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1697 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32:
1698 ; CHECK: # %bb.0: # %entry
1699 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1700 ; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
1703 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
1704 <vscale x 2 x i32> %0,
1705 <vscale x 2 x i32> %1,
1707 <vscale x 2 x i1> %3,
1710 ret <vscale x 2 x i32> %a
1713 declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
1719 define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1720 ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32:
1721 ; CHECK: # %bb.0: # %entry
1722 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1723 ; CHECK-NEXT: vadd.vx v8, v8, a0
1726 %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
1727 <vscale x 4 x i32> undef,
1728 <vscale x 4 x i32> %0,
1732 ret <vscale x 4 x i32> %a
1735 declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
1742 define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1743 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32:
1744 ; CHECK: # %bb.0: # %entry
1745 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1746 ; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t
1749 %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
1750 <vscale x 4 x i32> %0,
1751 <vscale x 4 x i32> %1,
1753 <vscale x 4 x i1> %3,
1756 ret <vscale x 4 x i32> %a
1759 declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
1765 define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1766 ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32:
1767 ; CHECK: # %bb.0: # %entry
1768 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1769 ; CHECK-NEXT: vadd.vx v8, v8, a0
1772 %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
1773 <vscale x 8 x i32> undef,
1774 <vscale x 8 x i32> %0,
1778 ret <vscale x 8 x i32> %a
1781 declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
1788 define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1789 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32:
1790 ; CHECK: # %bb.0: # %entry
1791 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1792 ; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t
1795 %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
1796 <vscale x 8 x i32> %0,
1797 <vscale x 8 x i32> %1,
1799 <vscale x 8 x i1> %3,
1802 ret <vscale x 8 x i32> %a
1805 declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
1806 <vscale x 16 x i32>,
1807 <vscale x 16 x i32>,
1811 define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1812 ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32:
1813 ; CHECK: # %bb.0: # %entry
1814 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1815 ; CHECK-NEXT: vadd.vx v8, v8, a0
1818 %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
1819 <vscale x 16 x i32> undef,
1820 <vscale x 16 x i32> %0,
1824 ret <vscale x 16 x i32> %a
1827 declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
1828 <vscale x 16 x i32>,
1829 <vscale x 16 x i32>,
1834 define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1835 ; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32:
1836 ; CHECK: # %bb.0: # %entry
1837 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1838 ; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t
1841 %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
1842 <vscale x 16 x i32> %0,
1843 <vscale x 16 x i32> %1,
1845 <vscale x 16 x i1> %3,
1848 ret <vscale x 16 x i32> %a
1851 declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
1857 define <vscale x 1 x i64> @intrinsic_vadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1858 ; RV32-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64:
1859 ; RV32: # %bb.0: # %entry
1860 ; RV32-NEXT: addi sp, sp, -16
1861 ; RV32-NEXT: sw a0, 8(sp)
1862 ; RV32-NEXT: sw a1, 12(sp)
1863 ; RV32-NEXT: addi a0, sp, 8
1864 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1865 ; RV32-NEXT: vlse64.v v9, (a0), zero
1866 ; RV32-NEXT: vadd.vv v8, v8, v9
1867 ; RV32-NEXT: addi sp, sp, 16
1870 ; RV64-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64:
1871 ; RV64: # %bb.0: # %entry
1872 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1873 ; RV64-NEXT: vadd.vx v8, v8, a0
1876 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
1877 <vscale x 1 x i64> undef,
1878 <vscale x 1 x i64> %0,
1882 ret <vscale x 1 x i64> %a
1885 define <vscale x 1 x i64> @intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
1886 ; RV32-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64:
1887 ; RV32: # %bb.0: # %entry
1888 ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1889 ; RV32-NEXT: vadd.vx v8, v8, a0
1892 ; RV64-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64:
1893 ; RV64: # %bb.0: # %entry
1894 ; RV64-NEXT: sext.w a0, a0
1895 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1896 ; RV64-NEXT: vadd.vx v8, v8, a0
1899 %ext = sext i32 %1 to i64
1900 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
1901 <vscale x 1 x i64> undef,
1902 <vscale x 1 x i64> %0,
1906 ret <vscale x 1 x i64> %a
1909 define <vscale x 1 x i64> @intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2) nounwind {
1910 ; CHECK-LABEL: intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64:
1911 ; CHECK: # %bb.0: # %entry
1912 ; CHECK-NEXT: lw a0, 0(a0)
1913 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1914 ; CHECK-NEXT: vadd.vx v8, v8, a0
1917 %load = load i32, ptr %1
1918 %ext = sext i32 %load to i64
1919 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
1920 <vscale x 1 x i64> undef,
1921 <vscale x 1 x i64> %0,
1925 ret <vscale x 1 x i64> %a
1928 declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
1935 define <vscale x 1 x i64> @intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1936 ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64:
1937 ; RV32: # %bb.0: # %entry
1938 ; RV32-NEXT: addi sp, sp, -16
1939 ; RV32-NEXT: sw a0, 8(sp)
1940 ; RV32-NEXT: sw a1, 12(sp)
1941 ; RV32-NEXT: addi a0, sp, 8
1942 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1943 ; RV32-NEXT: vlse64.v v10, (a0), zero
1944 ; RV32-NEXT: vadd.vv v8, v9, v10, v0.t
1945 ; RV32-NEXT: addi sp, sp, 16
1948 ; RV64-LABEL: intrinsic_vadd_mask_vx_nxv1i64_nxv1i64_i64:
1949 ; RV64: # %bb.0: # %entry
1950 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1951 ; RV64-NEXT: vadd.vx v8, v9, a0, v0.t
1954 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
1955 <vscale x 1 x i64> %0,
1956 <vscale x 1 x i64> %1,
1958 <vscale x 1 x i1> %3,
1961 ret <vscale x 1 x i64> %a
1964 declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
1970 define <vscale x 2 x i64> @intrinsic_vadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1971 ; RV32-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64:
1972 ; RV32: # %bb.0: # %entry
1973 ; RV32-NEXT: addi sp, sp, -16
1974 ; RV32-NEXT: sw a0, 8(sp)
1975 ; RV32-NEXT: sw a1, 12(sp)
1976 ; RV32-NEXT: addi a0, sp, 8
1977 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1978 ; RV32-NEXT: vlse64.v v10, (a0), zero
1979 ; RV32-NEXT: vadd.vv v8, v8, v10
1980 ; RV32-NEXT: addi sp, sp, 16
1983 ; RV64-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64:
1984 ; RV64: # %bb.0: # %entry
1985 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1986 ; RV64-NEXT: vadd.vx v8, v8, a0
1989 %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
1990 <vscale x 2 x i64> undef,
1991 <vscale x 2 x i64> %0,
1995 ret <vscale x 2 x i64> %a
1998 declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
2005 define <vscale x 2 x i64> @intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2006 ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64:
2007 ; RV32: # %bb.0: # %entry
2008 ; RV32-NEXT: addi sp, sp, -16
2009 ; RV32-NEXT: sw a0, 8(sp)
2010 ; RV32-NEXT: sw a1, 12(sp)
2011 ; RV32-NEXT: addi a0, sp, 8
2012 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2013 ; RV32-NEXT: vlse64.v v12, (a0), zero
2014 ; RV32-NEXT: vadd.vv v8, v10, v12, v0.t
2015 ; RV32-NEXT: addi sp, sp, 16
2018 ; RV64-LABEL: intrinsic_vadd_mask_vx_nxv2i64_nxv2i64_i64:
2019 ; RV64: # %bb.0: # %entry
2020 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2021 ; RV64-NEXT: vadd.vx v8, v10, a0, v0.t
2024 %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
2025 <vscale x 2 x i64> %0,
2026 <vscale x 2 x i64> %1,
2028 <vscale x 2 x i1> %3,
2031 ret <vscale x 2 x i64> %a
2034 declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
2040 define <vscale x 4 x i64> @intrinsic_vadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
2041 ; RV32-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64:
2042 ; RV32: # %bb.0: # %entry
2043 ; RV32-NEXT: addi sp, sp, -16
2044 ; RV32-NEXT: sw a0, 8(sp)
2045 ; RV32-NEXT: sw a1, 12(sp)
2046 ; RV32-NEXT: addi a0, sp, 8
2047 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2048 ; RV32-NEXT: vlse64.v v12, (a0), zero
2049 ; RV32-NEXT: vadd.vv v8, v8, v12
2050 ; RV32-NEXT: addi sp, sp, 16
2053 ; RV64-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64:
2054 ; RV64: # %bb.0: # %entry
2055 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2056 ; RV64-NEXT: vadd.vx v8, v8, a0
2059 %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
2060 <vscale x 4 x i64> undef,
2061 <vscale x 4 x i64> %0,
2065 ret <vscale x 4 x i64> %a
2068 declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
2075 define <vscale x 4 x i64> @intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2076 ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64:
2077 ; RV32: # %bb.0: # %entry
2078 ; RV32-NEXT: addi sp, sp, -16
2079 ; RV32-NEXT: sw a0, 8(sp)
2080 ; RV32-NEXT: sw a1, 12(sp)
2081 ; RV32-NEXT: addi a0, sp, 8
2082 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2083 ; RV32-NEXT: vlse64.v v16, (a0), zero
2084 ; RV32-NEXT: vadd.vv v8, v12, v16, v0.t
2085 ; RV32-NEXT: addi sp, sp, 16
2088 ; RV64-LABEL: intrinsic_vadd_mask_vx_nxv4i64_nxv4i64_i64:
2089 ; RV64: # %bb.0: # %entry
2090 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2091 ; RV64-NEXT: vadd.vx v8, v12, a0, v0.t
2094 %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
2095 <vscale x 4 x i64> %0,
2096 <vscale x 4 x i64> %1,
2098 <vscale x 4 x i1> %3,
2101 ret <vscale x 4 x i64> %a
2104 declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
2110 define <vscale x 8 x i64> @intrinsic_vadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2111 ; RV32-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64:
2112 ; RV32: # %bb.0: # %entry
2113 ; RV32-NEXT: addi sp, sp, -16
2114 ; RV32-NEXT: sw a0, 8(sp)
2115 ; RV32-NEXT: sw a1, 12(sp)
2116 ; RV32-NEXT: addi a0, sp, 8
2117 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2118 ; RV32-NEXT: vlse64.v v16, (a0), zero
2119 ; RV32-NEXT: vadd.vv v8, v8, v16
2120 ; RV32-NEXT: addi sp, sp, 16
2123 ; RV64-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64:
2124 ; RV64: # %bb.0: # %entry
2125 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2126 ; RV64-NEXT: vadd.vx v8, v8, a0
2129 %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
2130 <vscale x 8 x i64> undef,
2131 <vscale x 8 x i64> %0,
2135 ret <vscale x 8 x i64> %a
2138 declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
2145 define <vscale x 8 x i64> @intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2146 ; RV32-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64:
2147 ; RV32: # %bb.0: # %entry
2148 ; RV32-NEXT: addi sp, sp, -16
2149 ; RV32-NEXT: sw a0, 8(sp)
2150 ; RV32-NEXT: sw a1, 12(sp)
2151 ; RV32-NEXT: addi a0, sp, 8
2152 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2153 ; RV32-NEXT: vlse64.v v24, (a0), zero
2154 ; RV32-NEXT: vadd.vv v8, v16, v24, v0.t
2155 ; RV32-NEXT: addi sp, sp, 16
2158 ; RV64-LABEL: intrinsic_vadd_mask_vx_nxv8i64_nxv8i64_i64:
2159 ; RV64: # %bb.0: # %entry
2160 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2161 ; RV64-NEXT: vadd.vx v8, v16, a0, v0.t
2164 %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
2165 <vscale x 8 x i64> %0,
2166 <vscale x 8 x i64> %1,
2168 <vscale x 8 x i1> %3,
2171 ret <vscale x 8 x i64> %a
2174 define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
2175 ; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8:
2176 ; CHECK: # %bb.0: # %entry
2177 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2178 ; CHECK-NEXT: vadd.vi v8, v8, 9
2181 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
2182 <vscale x 1 x i8> undef,
2183 <vscale x 1 x i8> %0,
2187 ret <vscale x 1 x i8> %a
2190 define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2191 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8:
2192 ; CHECK: # %bb.0: # %entry
2193 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
2194 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2197 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
2198 <vscale x 1 x i8> %0,
2199 <vscale x 1 x i8> %1,
2201 <vscale x 1 x i1> %2,
2204 ret <vscale x 1 x i8> %a
2207 define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
2208 ; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8:
2209 ; CHECK: # %bb.0: # %entry
2210 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2211 ; CHECK-NEXT: vadd.vi v8, v8, 9
2214 %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
2215 <vscale x 2 x i8> undef,
2216 <vscale x 2 x i8> %0,
2220 ret <vscale x 2 x i8> %a
2223 define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2224 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8:
2225 ; CHECK: # %bb.0: # %entry
2226 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
2227 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2230 %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
2231 <vscale x 2 x i8> %0,
2232 <vscale x 2 x i8> %1,
2234 <vscale x 2 x i1> %2,
2237 ret <vscale x 2 x i8> %a
2240 define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
2241 ; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8:
2242 ; CHECK: # %bb.0: # %entry
2243 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2244 ; CHECK-NEXT: vadd.vi v8, v8, 9
2247 %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
2248 <vscale x 4 x i8> undef,
2249 <vscale x 4 x i8> %0,
2253 ret <vscale x 4 x i8> %a
2256 define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2257 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8:
2258 ; CHECK: # %bb.0: # %entry
2259 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
2260 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2263 %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
2264 <vscale x 4 x i8> %0,
2265 <vscale x 4 x i8> %1,
2267 <vscale x 4 x i1> %2,
2270 ret <vscale x 4 x i8> %a
2273 define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
2274 ; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8:
2275 ; CHECK: # %bb.0: # %entry
2276 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2277 ; CHECK-NEXT: vadd.vi v8, v8, 9
2280 %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
2281 <vscale x 8 x i8> undef,
2282 <vscale x 8 x i8> %0,
2286 ret <vscale x 8 x i8> %a
2289 define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2290 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8:
2291 ; CHECK: # %bb.0: # %entry
2292 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
2293 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2296 %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
2297 <vscale x 8 x i8> %0,
2298 <vscale x 8 x i8> %1,
2300 <vscale x 8 x i1> %2,
2303 ret <vscale x 8 x i8> %a
2306 define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2307 ; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8:
2308 ; CHECK: # %bb.0: # %entry
2309 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2310 ; CHECK-NEXT: vadd.vi v8, v8, 9
2313 %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
2314 <vscale x 16 x i8> undef,
2315 <vscale x 16 x i8> %0,
2319 ret <vscale x 16 x i8> %a
2322 define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2323 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8:
2324 ; CHECK: # %bb.0: # %entry
2325 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2326 ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
2329 %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
2330 <vscale x 16 x i8> %0,
2331 <vscale x 16 x i8> %1,
2333 <vscale x 16 x i1> %2,
2336 ret <vscale x 16 x i8> %a
2339 define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2340 ; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8:
2341 ; CHECK: # %bb.0: # %entry
2342 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2343 ; CHECK-NEXT: vadd.vi v8, v8, 9
2346 %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
2347 <vscale x 32 x i8> undef,
2348 <vscale x 32 x i8> %0,
2352 ret <vscale x 32 x i8> %a
2355 define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2356 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8:
2357 ; CHECK: # %bb.0: # %entry
2358 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2359 ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
2362 %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
2363 <vscale x 32 x i8> %0,
2364 <vscale x 32 x i8> %1,
2366 <vscale x 32 x i1> %2,
2369 ret <vscale x 32 x i8> %a
2372 define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
2373 ; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8:
2374 ; CHECK: # %bb.0: # %entry
2375 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2376 ; CHECK-NEXT: vadd.vi v8, v8, -9
2379 %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
2380 <vscale x 64 x i8> undef,
2381 <vscale x 64 x i8> %0,
2385 ret <vscale x 64 x i8> %a
2388 define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
2389 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8:
2390 ; CHECK: # %bb.0: # %entry
2391 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
2392 ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
2395 %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
2396 <vscale x 64 x i8> %0,
2397 <vscale x 64 x i8> %1,
2399 <vscale x 64 x i1> %2,
2402 ret <vscale x 64 x i8> %a
2405 define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2406 ; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16:
2407 ; CHECK: # %bb.0: # %entry
2408 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2409 ; CHECK-NEXT: vadd.vi v8, v8, 9
2412 %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
2413 <vscale x 1 x i16> undef,
2414 <vscale x 1 x i16> %0,
2418 ret <vscale x 1 x i16> %a
2421 define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2422 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16:
2423 ; CHECK: # %bb.0: # %entry
2424 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2425 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2428 %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
2429 <vscale x 1 x i16> %0,
2430 <vscale x 1 x i16> %1,
2432 <vscale x 1 x i1> %2,
2435 ret <vscale x 1 x i16> %a
2438 define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2439 ; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16:
2440 ; CHECK: # %bb.0: # %entry
2441 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2442 ; CHECK-NEXT: vadd.vi v8, v8, 9
2445 %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
2446 <vscale x 2 x i16> undef,
2447 <vscale x 2 x i16> %0,
2451 ret <vscale x 2 x i16> %a
2454 define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2455 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16:
2456 ; CHECK: # %bb.0: # %entry
2457 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2458 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2461 %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
2462 <vscale x 2 x i16> %0,
2463 <vscale x 2 x i16> %1,
2465 <vscale x 2 x i1> %2,
2468 ret <vscale x 2 x i16> %a
2471 define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2472 ; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16:
2473 ; CHECK: # %bb.0: # %entry
2474 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2475 ; CHECK-NEXT: vadd.vi v8, v8, 9
2478 %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
2479 <vscale x 4 x i16> undef,
2480 <vscale x 4 x i16> %0,
2484 ret <vscale x 4 x i16> %a
2487 define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2488 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16:
2489 ; CHECK: # %bb.0: # %entry
2490 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2491 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2494 %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
2495 <vscale x 4 x i16> %0,
2496 <vscale x 4 x i16> %1,
2498 <vscale x 4 x i1> %2,
2501 ret <vscale x 4 x i16> %a
2504 define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2505 ; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16:
2506 ; CHECK: # %bb.0: # %entry
2507 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2508 ; CHECK-NEXT: vadd.vi v8, v8, 9
2511 %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
2512 <vscale x 8 x i16> undef,
2513 <vscale x 8 x i16> %0,
2517 ret <vscale x 8 x i16> %a
2520 define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2521 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16:
2522 ; CHECK: # %bb.0: # %entry
2523 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2524 ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
2527 %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
2528 <vscale x 8 x i16> %0,
2529 <vscale x 8 x i16> %1,
2531 <vscale x 8 x i1> %2,
2534 ret <vscale x 8 x i16> %a
2537 define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2538 ; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16:
2539 ; CHECK: # %bb.0: # %entry
2540 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2541 ; CHECK-NEXT: vadd.vi v8, v8, 9
2544 %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
2545 <vscale x 16 x i16> undef,
2546 <vscale x 16 x i16> %0,
2550 ret <vscale x 16 x i16> %a
2553 define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2554 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16:
2555 ; CHECK: # %bb.0: # %entry
2556 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2557 ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
2560 %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
2561 <vscale x 16 x i16> %0,
2562 <vscale x 16 x i16> %1,
2564 <vscale x 16 x i1> %2,
2567 ret <vscale x 16 x i16> %a
2570 define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
2571 ; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16:
2572 ; CHECK: # %bb.0: # %entry
2573 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2574 ; CHECK-NEXT: vadd.vi v8, v8, 9
2577 %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
2578 <vscale x 32 x i16> undef,
2579 <vscale x 32 x i16> %0,
2583 ret <vscale x 32 x i16> %a
2586 define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2587 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16:
2588 ; CHECK: # %bb.0: # %entry
2589 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
2590 ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
2593 %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
2594 <vscale x 32 x i16> %0,
2595 <vscale x 32 x i16> %1,
2597 <vscale x 32 x i1> %2,
2600 ret <vscale x 32 x i16> %a
2603 define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2604 ; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32:
2605 ; CHECK: # %bb.0: # %entry
2606 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2607 ; CHECK-NEXT: vadd.vi v8, v8, 9
2610 %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
2611 <vscale x 1 x i32> undef,
2612 <vscale x 1 x i32> %0,
2616 ret <vscale x 1 x i32> %a
2619 define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2620 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32:
2621 ; CHECK: # %bb.0: # %entry
2622 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2623 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2626 %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
2627 <vscale x 1 x i32> %0,
2628 <vscale x 1 x i32> %1,
2630 <vscale x 1 x i1> %2,
2633 ret <vscale x 1 x i32> %a
2636 define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2637 ; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32:
2638 ; CHECK: # %bb.0: # %entry
2639 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2640 ; CHECK-NEXT: vadd.vi v8, v8, 9
2643 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
2644 <vscale x 2 x i32> undef,
2645 <vscale x 2 x i32> %0,
2649 ret <vscale x 2 x i32> %a
2652 define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2653 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32:
2654 ; CHECK: # %bb.0: # %entry
2655 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2656 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2659 %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
2660 <vscale x 2 x i32> %0,
2661 <vscale x 2 x i32> %1,
2663 <vscale x 2 x i1> %2,
2666 ret <vscale x 2 x i32> %a
2669 define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2670 ; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32:
2671 ; CHECK: # %bb.0: # %entry
2672 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2673 ; CHECK-NEXT: vadd.vi v8, v8, 9
2676 %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
2677 <vscale x 4 x i32> undef,
2678 <vscale x 4 x i32> %0,
2682 ret <vscale x 4 x i32> %a
2685 define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2686 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32:
2687 ; CHECK: # %bb.0: # %entry
2688 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2689 ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
2692 %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
2693 <vscale x 4 x i32> %0,
2694 <vscale x 4 x i32> %1,
2696 <vscale x 4 x i1> %2,
2699 ret <vscale x 4 x i32> %a
2702 define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2703 ; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32:
2704 ; CHECK: # %bb.0: # %entry
2705 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2706 ; CHECK-NEXT: vadd.vi v8, v8, 9
2709 %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
2710 <vscale x 8 x i32> undef,
2711 <vscale x 8 x i32> %0,
2715 ret <vscale x 8 x i32> %a
2718 define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2719 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32:
2720 ; CHECK: # %bb.0: # %entry
2721 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2722 ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
2725 %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
2726 <vscale x 8 x i32> %0,
2727 <vscale x 8 x i32> %1,
2729 <vscale x 8 x i1> %2,
2732 ret <vscale x 8 x i32> %a
2735 define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
2736 ; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32:
2737 ; CHECK: # %bb.0: # %entry
2738 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2739 ; CHECK-NEXT: vadd.vi v8, v8, 9
2742 %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
2743 <vscale x 16 x i32> undef,
2744 <vscale x 16 x i32> %0,
2748 ret <vscale x 16 x i32> %a
2751 define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2752 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32:
2753 ; CHECK: # %bb.0: # %entry
2754 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
2755 ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
2758 %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
2759 <vscale x 16 x i32> %0,
2760 <vscale x 16 x i32> %1,
2762 <vscale x 16 x i1> %2,
2765 ret <vscale x 16 x i32> %a
2768 define <vscale x 1 x i64> @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2769 ; CHECK-LABEL: intrinsic_vadd_vi_nxv1i64_nxv1i64_i64:
2770 ; CHECK: # %bb.0: # %entry
2771 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2772 ; CHECK-NEXT: vadd.vi v8, v8, 9
2775 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64(
2776 <vscale x 1 x i64> undef,
2777 <vscale x 1 x i64> %0,
2781 ret <vscale x 1 x i64> %a
2784 define <vscale x 1 x i64> @intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2785 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i64_nxv1i64_i64:
2786 ; CHECK: # %bb.0: # %entry
2787 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2788 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2791 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64(
2792 <vscale x 1 x i64> %0,
2793 <vscale x 1 x i64> %1,
2795 <vscale x 1 x i1> %2,
2798 ret <vscale x 1 x i64> %a
2801 define <vscale x 2 x i64> @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2802 ; CHECK-LABEL: intrinsic_vadd_vi_nxv2i64_nxv2i64_i64:
2803 ; CHECK: # %bb.0: # %entry
2804 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2805 ; CHECK-NEXT: vadd.vi v8, v8, 9
2808 %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64(
2809 <vscale x 2 x i64> undef,
2810 <vscale x 2 x i64> %0,
2814 ret <vscale x 2 x i64> %a
2817 define <vscale x 2 x i64> @intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2818 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i64_nxv2i64_i64:
2819 ; CHECK: # %bb.0: # %entry
2820 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2821 ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t
2824 %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64(
2825 <vscale x 2 x i64> %0,
2826 <vscale x 2 x i64> %1,
2828 <vscale x 2 x i1> %2,
2831 ret <vscale x 2 x i64> %a
2834 define <vscale x 4 x i64> @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2835 ; CHECK-LABEL: intrinsic_vadd_vi_nxv4i64_nxv4i64_i64:
2836 ; CHECK: # %bb.0: # %entry
2837 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2838 ; CHECK-NEXT: vadd.vi v8, v8, 9
2841 %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64(
2842 <vscale x 4 x i64> undef,
2843 <vscale x 4 x i64> %0,
2847 ret <vscale x 4 x i64> %a
2850 define <vscale x 4 x i64> @intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2851 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i64_nxv4i64_i64:
2852 ; CHECK: # %bb.0: # %entry
2853 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2854 ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t
2857 %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64(
2858 <vscale x 4 x i64> %0,
2859 <vscale x 4 x i64> %1,
2861 <vscale x 4 x i1> %2,
2864 ret <vscale x 4 x i64> %a
2867 define <vscale x 8 x i64> @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
2868 ; CHECK-LABEL: intrinsic_vadd_vi_nxv8i64_nxv8i64_i64:
2869 ; CHECK: # %bb.0: # %entry
2870 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2871 ; CHECK-NEXT: vadd.vi v8, v8, 9
2874 %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64(
2875 <vscale x 8 x i64> undef,
2876 <vscale x 8 x i64> %0,
2880 ret <vscale x 8 x i64> %a
2883 define <vscale x 8 x i64> @intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2884 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i64_nxv8i64_i64:
2885 ; CHECK: # %bb.0: # %entry
2886 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
2887 ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t
2890 %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64(
2891 <vscale x 8 x i64> %0,
2892 <vscale x 8 x i64> %1,
2894 <vscale x 8 x i1> %2,
2897 ret <vscale x 8 x i64> %a