1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: csrwi vxrm, 0
17 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
18 ; CHECK-NEXT: vaadd.vv v8, v8, v9
21 %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
22 <vscale x 1 x i8> undef,
27 ret <vscale x 1 x i8> %a
30 declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
37 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: csrwi vxrm, 0
41 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
42 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
50 iXLen 0, iXLen %4, iXLen 1)
52 ret <vscale x 1 x i8> %a
55 declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
61 define <vscale x 2 x i8> @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: csrwi vxrm, 0
65 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
66 ; CHECK-NEXT: vaadd.vv v8, v8, v9
69 %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
70 <vscale x 2 x i8> undef,
75 ret <vscale x 2 x i8> %a
78 declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
85 define <vscale x 2 x i8> @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
86 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: csrwi vxrm, 0
89 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
90 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
93 %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
98 iXLen 0, iXLen %4, iXLen 1)
100 ret <vscale x 2 x i8> %a
103 declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
109 define <vscale x 4 x i8> @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: csrwi vxrm, 0
113 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
114 ; CHECK-NEXT: vaadd.vv v8, v8, v9
117 %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
118 <vscale x 4 x i8> undef,
119 <vscale x 4 x i8> %0,
120 <vscale x 4 x i8> %1,
123 ret <vscale x 4 x i8> %a
126 declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
131 iXLen, iXLen, iXLen);
133 define <vscale x 4 x i8> @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
134 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
135 ; CHECK: # %bb.0: # %entry
136 ; CHECK-NEXT: csrwi vxrm, 0
137 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
138 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
141 %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
142 <vscale x 4 x i8> %0,
143 <vscale x 4 x i8> %1,
144 <vscale x 4 x i8> %2,
145 <vscale x 4 x i1> %3,
146 iXLen 0, iXLen %4, iXLen 1)
148 ret <vscale x 4 x i8> %a
151 declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
157 define <vscale x 8 x i8> @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: csrwi vxrm, 0
161 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
162 ; CHECK-NEXT: vaadd.vv v8, v8, v9
165 %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
166 <vscale x 8 x i8> undef,
167 <vscale x 8 x i8> %0,
168 <vscale x 8 x i8> %1,
171 ret <vscale x 8 x i8> %a
174 declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
179 iXLen, iXLen, iXLen);
181 define <vscale x 8 x i8> @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
182 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
183 ; CHECK: # %bb.0: # %entry
184 ; CHECK-NEXT: csrwi vxrm, 0
185 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
186 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
189 %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
190 <vscale x 8 x i8> %0,
191 <vscale x 8 x i8> %1,
192 <vscale x 8 x i8> %2,
193 <vscale x 8 x i1> %3,
194 iXLen 0, iXLen %4, iXLen 1)
196 ret <vscale x 8 x i8> %a
199 declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
205 define <vscale x 16 x i8> @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
206 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8:
207 ; CHECK: # %bb.0: # %entry
208 ; CHECK-NEXT: csrwi vxrm, 0
209 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
210 ; CHECK-NEXT: vaadd.vv v8, v8, v10
213 %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
214 <vscale x 16 x i8> undef,
215 <vscale x 16 x i8> %0,
216 <vscale x 16 x i8> %1,
219 ret <vscale x 16 x i8> %a
222 declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
227 iXLen, iXLen, iXLen);
229 define <vscale x 16 x i8> @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
230 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
231 ; CHECK: # %bb.0: # %entry
232 ; CHECK-NEXT: csrwi vxrm, 0
233 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
234 ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t
237 %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
238 <vscale x 16 x i8> %0,
239 <vscale x 16 x i8> %1,
240 <vscale x 16 x i8> %2,
241 <vscale x 16 x i1> %3,
242 iXLen 0, iXLen %4, iXLen 1)
244 ret <vscale x 16 x i8> %a
247 declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
253 define <vscale x 32 x i8> @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
254 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8:
255 ; CHECK: # %bb.0: # %entry
256 ; CHECK-NEXT: csrwi vxrm, 0
257 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
258 ; CHECK-NEXT: vaadd.vv v8, v8, v12
261 %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
262 <vscale x 32 x i8> undef,
263 <vscale x 32 x i8> %0,
264 <vscale x 32 x i8> %1,
267 ret <vscale x 32 x i8> %a
270 declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
275 iXLen, iXLen, iXLen);
277 define <vscale x 32 x i8> @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
278 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
279 ; CHECK: # %bb.0: # %entry
280 ; CHECK-NEXT: csrwi vxrm, 0
281 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
282 ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t
285 %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
286 <vscale x 32 x i8> %0,
287 <vscale x 32 x i8> %1,
288 <vscale x 32 x i8> %2,
289 <vscale x 32 x i1> %3,
290 iXLen 0, iXLen %4, iXLen 1)
292 ret <vscale x 32 x i8> %a
295 declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
301 define <vscale x 64 x i8> @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
302 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: csrwi vxrm, 0
305 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
306 ; CHECK-NEXT: vaadd.vv v8, v8, v16
309 %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
310 <vscale x 64 x i8> undef,
311 <vscale x 64 x i8> %0,
312 <vscale x 64 x i8> %1,
315 ret <vscale x 64 x i8> %a
318 declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
323 iXLen, iXLen, iXLen);
325 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
326 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
327 ; CHECK: # %bb.0: # %entry
328 ; CHECK-NEXT: vl8r.v v24, (a0)
329 ; CHECK-NEXT: csrwi vxrm, 0
330 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
331 ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
334 %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8(
335 <vscale x 64 x i8> %0,
336 <vscale x 64 x i8> %1,
337 <vscale x 64 x i8> %2,
338 <vscale x 64 x i1> %3,
339 iXLen 0, iXLen %4, iXLen 1)
341 ret <vscale x 64 x i8> %a
344 declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
350 define <vscale x 1 x i16> @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
351 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16:
352 ; CHECK: # %bb.0: # %entry
353 ; CHECK-NEXT: csrwi vxrm, 0
354 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
355 ; CHECK-NEXT: vaadd.vv v8, v8, v9
358 %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
359 <vscale x 1 x i16> undef,
360 <vscale x 1 x i16> %0,
361 <vscale x 1 x i16> %1,
364 ret <vscale x 1 x i16> %a
367 declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
372 iXLen, iXLen, iXLen);
374 define <vscale x 1 x i16> @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
375 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: csrwi vxrm, 0
378 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
379 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
382 %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
383 <vscale x 1 x i16> %0,
384 <vscale x 1 x i16> %1,
385 <vscale x 1 x i16> %2,
386 <vscale x 1 x i1> %3,
387 iXLen 0, iXLen %4, iXLen 1)
389 ret <vscale x 1 x i16> %a
392 declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
398 define <vscale x 2 x i16> @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
399 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16:
400 ; CHECK: # %bb.0: # %entry
401 ; CHECK-NEXT: csrwi vxrm, 0
402 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
403 ; CHECK-NEXT: vaadd.vv v8, v8, v9
406 %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
407 <vscale x 2 x i16> undef,
408 <vscale x 2 x i16> %0,
409 <vscale x 2 x i16> %1,
412 ret <vscale x 2 x i16> %a
415 declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
420 iXLen, iXLen, iXLen);
422 define <vscale x 2 x i16> @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
423 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
424 ; CHECK: # %bb.0: # %entry
425 ; CHECK-NEXT: csrwi vxrm, 0
426 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
427 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
430 %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
431 <vscale x 2 x i16> %0,
432 <vscale x 2 x i16> %1,
433 <vscale x 2 x i16> %2,
434 <vscale x 2 x i1> %3,
435 iXLen 0, iXLen %4, iXLen 1)
437 ret <vscale x 2 x i16> %a
440 declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
446 define <vscale x 4 x i16> @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
447 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16:
448 ; CHECK: # %bb.0: # %entry
449 ; CHECK-NEXT: csrwi vxrm, 0
450 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
451 ; CHECK-NEXT: vaadd.vv v8, v8, v9
454 %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
455 <vscale x 4 x i16> undef,
456 <vscale x 4 x i16> %0,
457 <vscale x 4 x i16> %1,
460 ret <vscale x 4 x i16> %a
463 declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
468 iXLen, iXLen, iXLen);
470 define <vscale x 4 x i16> @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
471 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
472 ; CHECK: # %bb.0: # %entry
473 ; CHECK-NEXT: csrwi vxrm, 0
474 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
475 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
478 %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
479 <vscale x 4 x i16> %0,
480 <vscale x 4 x i16> %1,
481 <vscale x 4 x i16> %2,
482 <vscale x 4 x i1> %3,
483 iXLen 0, iXLen %4, iXLen 1)
485 ret <vscale x 4 x i16> %a
488 declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
494 define <vscale x 8 x i16> @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
495 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16:
496 ; CHECK: # %bb.0: # %entry
497 ; CHECK-NEXT: csrwi vxrm, 0
498 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
499 ; CHECK-NEXT: vaadd.vv v8, v8, v10
502 %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
503 <vscale x 8 x i16> undef,
504 <vscale x 8 x i16> %0,
505 <vscale x 8 x i16> %1,
508 ret <vscale x 8 x i16> %a
511 declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
516 iXLen, iXLen, iXLen);
518 define <vscale x 8 x i16> @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
519 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
520 ; CHECK: # %bb.0: # %entry
521 ; CHECK-NEXT: csrwi vxrm, 0
522 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
523 ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t
526 %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
527 <vscale x 8 x i16> %0,
528 <vscale x 8 x i16> %1,
529 <vscale x 8 x i16> %2,
530 <vscale x 8 x i1> %3,
531 iXLen 0, iXLen %4, iXLen 1)
533 ret <vscale x 8 x i16> %a
536 declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
542 define <vscale x 16 x i16> @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
543 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16:
544 ; CHECK: # %bb.0: # %entry
545 ; CHECK-NEXT: csrwi vxrm, 0
546 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
547 ; CHECK-NEXT: vaadd.vv v8, v8, v12
550 %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
551 <vscale x 16 x i16> undef,
552 <vscale x 16 x i16> %0,
553 <vscale x 16 x i16> %1,
556 ret <vscale x 16 x i16> %a
559 declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
564 iXLen, iXLen, iXLen);
566 define <vscale x 16 x i16> @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
567 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: csrwi vxrm, 0
570 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
571 ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t
574 %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
575 <vscale x 16 x i16> %0,
576 <vscale x 16 x i16> %1,
577 <vscale x 16 x i16> %2,
578 <vscale x 16 x i1> %3,
579 iXLen 0, iXLen %4, iXLen 1)
581 ret <vscale x 16 x i16> %a
584 declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
590 define <vscale x 32 x i16> @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
591 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16:
592 ; CHECK: # %bb.0: # %entry
593 ; CHECK-NEXT: csrwi vxrm, 0
594 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
595 ; CHECK-NEXT: vaadd.vv v8, v8, v16
598 %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
599 <vscale x 32 x i16> undef,
600 <vscale x 32 x i16> %0,
601 <vscale x 32 x i16> %1,
604 ret <vscale x 32 x i16> %a
607 declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
612 iXLen, iXLen, iXLen);
614 define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
615 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
616 ; CHECK: # %bb.0: # %entry
617 ; CHECK-NEXT: vl8re16.v v24, (a0)
618 ; CHECK-NEXT: csrwi vxrm, 0
619 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
620 ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
623 %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16(
624 <vscale x 32 x i16> %0,
625 <vscale x 32 x i16> %1,
626 <vscale x 32 x i16> %2,
627 <vscale x 32 x i1> %3,
628 iXLen 0, iXLen %4, iXLen 1)
630 ret <vscale x 32 x i16> %a
633 declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
639 define <vscale x 1 x i32> @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
640 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32:
641 ; CHECK: # %bb.0: # %entry
642 ; CHECK-NEXT: csrwi vxrm, 0
643 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
644 ; CHECK-NEXT: vaadd.vv v8, v8, v9
647 %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
648 <vscale x 1 x i32> undef,
649 <vscale x 1 x i32> %0,
650 <vscale x 1 x i32> %1,
653 ret <vscale x 1 x i32> %a
656 declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
661 iXLen, iXLen, iXLen);
663 define <vscale x 1 x i32> @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
664 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
665 ; CHECK: # %bb.0: # %entry
666 ; CHECK-NEXT: csrwi vxrm, 0
667 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
668 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
671 %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
672 <vscale x 1 x i32> %0,
673 <vscale x 1 x i32> %1,
674 <vscale x 1 x i32> %2,
675 <vscale x 1 x i1> %3,
676 iXLen 0, iXLen %4, iXLen 1)
678 ret <vscale x 1 x i32> %a
681 declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
687 define <vscale x 2 x i32> @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
688 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32:
689 ; CHECK: # %bb.0: # %entry
690 ; CHECK-NEXT: csrwi vxrm, 0
691 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
692 ; CHECK-NEXT: vaadd.vv v8, v8, v9
695 %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
696 <vscale x 2 x i32> undef,
697 <vscale x 2 x i32> %0,
698 <vscale x 2 x i32> %1,
701 ret <vscale x 2 x i32> %a
704 declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
709 iXLen, iXLen, iXLen);
711 define <vscale x 2 x i32> @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
712 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
713 ; CHECK: # %bb.0: # %entry
714 ; CHECK-NEXT: csrwi vxrm, 0
715 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
716 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
719 %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
720 <vscale x 2 x i32> %0,
721 <vscale x 2 x i32> %1,
722 <vscale x 2 x i32> %2,
723 <vscale x 2 x i1> %3,
724 iXLen 0, iXLen %4, iXLen 1)
726 ret <vscale x 2 x i32> %a
729 declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
735 define <vscale x 4 x i32> @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
736 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32:
737 ; CHECK: # %bb.0: # %entry
738 ; CHECK-NEXT: csrwi vxrm, 0
739 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
740 ; CHECK-NEXT: vaadd.vv v8, v8, v10
743 %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
744 <vscale x 4 x i32> undef,
745 <vscale x 4 x i32> %0,
746 <vscale x 4 x i32> %1,
749 ret <vscale x 4 x i32> %a
752 declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
757 iXLen, iXLen, iXLen);
759 define <vscale x 4 x i32> @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
760 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
761 ; CHECK: # %bb.0: # %entry
762 ; CHECK-NEXT: csrwi vxrm, 0
763 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
764 ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t
767 %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
768 <vscale x 4 x i32> %0,
769 <vscale x 4 x i32> %1,
770 <vscale x 4 x i32> %2,
771 <vscale x 4 x i1> %3,
772 iXLen 0, iXLen %4, iXLen 1)
774 ret <vscale x 4 x i32> %a
777 declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
783 define <vscale x 8 x i32> @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
784 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32:
785 ; CHECK: # %bb.0: # %entry
786 ; CHECK-NEXT: csrwi vxrm, 0
787 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
788 ; CHECK-NEXT: vaadd.vv v8, v8, v12
791 %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
792 <vscale x 8 x i32> undef,
793 <vscale x 8 x i32> %0,
794 <vscale x 8 x i32> %1,
797 ret <vscale x 8 x i32> %a
800 declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
805 iXLen, iXLen, iXLen);
807 define <vscale x 8 x i32> @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
808 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
809 ; CHECK: # %bb.0: # %entry
810 ; CHECK-NEXT: csrwi vxrm, 0
811 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
812 ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t
815 %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
816 <vscale x 8 x i32> %0,
817 <vscale x 8 x i32> %1,
818 <vscale x 8 x i32> %2,
819 <vscale x 8 x i1> %3,
820 iXLen 0, iXLen %4, iXLen 1)
822 ret <vscale x 8 x i32> %a
825 declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
831 define <vscale x 16 x i32> @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
832 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32:
833 ; CHECK: # %bb.0: # %entry
834 ; CHECK-NEXT: csrwi vxrm, 0
835 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
836 ; CHECK-NEXT: vaadd.vv v8, v8, v16
839 %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
840 <vscale x 16 x i32> undef,
841 <vscale x 16 x i32> %0,
842 <vscale x 16 x i32> %1,
845 ret <vscale x 16 x i32> %a
848 declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
853 iXLen, iXLen, iXLen);
855 define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
856 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
857 ; CHECK: # %bb.0: # %entry
858 ; CHECK-NEXT: vl8re32.v v24, (a0)
859 ; CHECK-NEXT: csrwi vxrm, 0
860 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
861 ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
864 %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32(
865 <vscale x 16 x i32> %0,
866 <vscale x 16 x i32> %1,
867 <vscale x 16 x i32> %2,
868 <vscale x 16 x i1> %3,
869 iXLen 0, iXLen %4, iXLen 1)
871 ret <vscale x 16 x i32> %a
874 declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
880 define <vscale x 1 x i64> @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
881 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64:
882 ; CHECK: # %bb.0: # %entry
883 ; CHECK-NEXT: csrwi vxrm, 0
884 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
885 ; CHECK-NEXT: vaadd.vv v8, v8, v9
888 %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
889 <vscale x 1 x i64> undef,
890 <vscale x 1 x i64> %0,
891 <vscale x 1 x i64> %1,
894 ret <vscale x 1 x i64> %a
897 declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
902 iXLen, iXLen, iXLen);
904 define <vscale x 1 x i64> @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
905 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
906 ; CHECK: # %bb.0: # %entry
907 ; CHECK-NEXT: csrwi vxrm, 0
908 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
909 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t
912 %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
913 <vscale x 1 x i64> %0,
914 <vscale x 1 x i64> %1,
915 <vscale x 1 x i64> %2,
916 <vscale x 1 x i1> %3,
917 iXLen 0, iXLen %4, iXLen 1)
919 ret <vscale x 1 x i64> %a
922 declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
928 define <vscale x 2 x i64> @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
929 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64:
930 ; CHECK: # %bb.0: # %entry
931 ; CHECK-NEXT: csrwi vxrm, 0
932 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
933 ; CHECK-NEXT: vaadd.vv v8, v8, v10
936 %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
937 <vscale x 2 x i64> undef,
938 <vscale x 2 x i64> %0,
939 <vscale x 2 x i64> %1,
942 ret <vscale x 2 x i64> %a
945 declare <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
950 iXLen, iXLen, iXLen);
952 define <vscale x 2 x i64> @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
953 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
954 ; CHECK: # %bb.0: # %entry
955 ; CHECK-NEXT: csrwi vxrm, 0
956 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
957 ; CHECK-NEXT: vaadd.vv v8, v10, v12, v0.t
960 %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
961 <vscale x 2 x i64> %0,
962 <vscale x 2 x i64> %1,
963 <vscale x 2 x i64> %2,
964 <vscale x 2 x i1> %3,
965 iXLen 0, iXLen %4, iXLen 1)
967 ret <vscale x 2 x i64> %a
970 declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
976 define <vscale x 4 x i64> @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
977 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64:
978 ; CHECK: # %bb.0: # %entry
979 ; CHECK-NEXT: csrwi vxrm, 0
980 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
981 ; CHECK-NEXT: vaadd.vv v8, v8, v12
984 %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
985 <vscale x 4 x i64> undef,
986 <vscale x 4 x i64> %0,
987 <vscale x 4 x i64> %1,
990 ret <vscale x 4 x i64> %a
993 declare <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
998 iXLen, iXLen, iXLen);
1000 define <vscale x 4 x i64> @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1001 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
1002 ; CHECK: # %bb.0: # %entry
1003 ; CHECK-NEXT: csrwi vxrm, 0
1004 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1005 ; CHECK-NEXT: vaadd.vv v8, v12, v16, v0.t
1008 %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
1009 <vscale x 4 x i64> %0,
1010 <vscale x 4 x i64> %1,
1011 <vscale x 4 x i64> %2,
1012 <vscale x 4 x i1> %3,
1013 iXLen 0, iXLen %4, iXLen 1)
1015 ret <vscale x 4 x i64> %a
1018 declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
1024 define <vscale x 8 x i64> @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1025 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64:
1026 ; CHECK: # %bb.0: # %entry
1027 ; CHECK-NEXT: csrwi vxrm, 0
1028 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1029 ; CHECK-NEXT: vaadd.vv v8, v8, v16
1032 %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
1033 <vscale x 8 x i64> undef,
1034 <vscale x 8 x i64> %0,
1035 <vscale x 8 x i64> %1,
1038 ret <vscale x 8 x i64> %a
1041 declare <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
1046 iXLen, iXLen, iXLen);
1048 define <vscale x 8 x i64> @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1049 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1050 ; CHECK: # %bb.0: # %entry
1051 ; CHECK-NEXT: vl8re64.v v24, (a0)
1052 ; CHECK-NEXT: csrwi vxrm, 0
1053 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1054 ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t
1057 %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64(
1058 <vscale x 8 x i64> %0,
1059 <vscale x 8 x i64> %1,
1060 <vscale x 8 x i64> %2,
1061 <vscale x 8 x i1> %3,
1062 iXLen 0, iXLen %4, iXLen 1)
1064 ret <vscale x 8 x i64> %a
1067 declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
1073 define <vscale x 1 x i8> @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1074 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8:
1075 ; CHECK: # %bb.0: # %entry
1076 ; CHECK-NEXT: csrwi vxrm, 0
1077 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1078 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1081 %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
1082 <vscale x 1 x i8> undef,
1083 <vscale x 1 x i8> %0,
1087 ret <vscale x 1 x i8> %a
1090 declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
1095 iXLen, iXLen, iXLen);
1097 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1098 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8:
1099 ; CHECK: # %bb.0: # %entry
1100 ; CHECK-NEXT: csrwi vxrm, 0
1101 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1102 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1105 %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
1106 <vscale x 1 x i8> %0,
1107 <vscale x 1 x i8> %1,
1109 <vscale x 1 x i1> %3,
1110 iXLen 0, iXLen %4, iXLen 1)
1112 ret <vscale x 1 x i8> %a
1115 declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
1121 define <vscale x 2 x i8> @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1122 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8:
1123 ; CHECK: # %bb.0: # %entry
1124 ; CHECK-NEXT: csrwi vxrm, 0
1125 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1126 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1129 %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
1130 <vscale x 2 x i8> undef,
1131 <vscale x 2 x i8> %0,
1135 ret <vscale x 2 x i8> %a
1138 declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
1143 iXLen, iXLen, iXLen);
1145 define <vscale x 2 x i8> @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1146 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8:
1147 ; CHECK: # %bb.0: # %entry
1148 ; CHECK-NEXT: csrwi vxrm, 0
1149 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1150 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1153 %a = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
1154 <vscale x 2 x i8> %0,
1155 <vscale x 2 x i8> %1,
1157 <vscale x 2 x i1> %3,
1158 iXLen 0, iXLen %4, iXLen 1)
1160 ret <vscale x 2 x i8> %a
1163 declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
1169 define <vscale x 4 x i8> @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1170 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8:
1171 ; CHECK: # %bb.0: # %entry
1172 ; CHECK-NEXT: csrwi vxrm, 0
1173 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1174 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1177 %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
1178 <vscale x 4 x i8> undef,
1179 <vscale x 4 x i8> %0,
1183 ret <vscale x 4 x i8> %a
1186 declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
1191 iXLen, iXLen, iXLen);
1193 define <vscale x 4 x i8> @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1194 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8:
1195 ; CHECK: # %bb.0: # %entry
1196 ; CHECK-NEXT: csrwi vxrm, 0
1197 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1198 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1201 %a = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
1202 <vscale x 4 x i8> %0,
1203 <vscale x 4 x i8> %1,
1205 <vscale x 4 x i1> %3,
1206 iXLen 0, iXLen %4, iXLen 1)
1208 ret <vscale x 4 x i8> %a
1211 declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
1217 define <vscale x 8 x i8> @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1218 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8:
1219 ; CHECK: # %bb.0: # %entry
1220 ; CHECK-NEXT: csrwi vxrm, 0
1221 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1222 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1225 %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
1226 <vscale x 8 x i8> undef,
1227 <vscale x 8 x i8> %0,
1231 ret <vscale x 8 x i8> %a
1234 declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
1239 iXLen, iXLen, iXLen);
1241 define <vscale x 8 x i8> @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1242 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8:
1243 ; CHECK: # %bb.0: # %entry
1244 ; CHECK-NEXT: csrwi vxrm, 0
1245 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1246 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1249 %a = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
1250 <vscale x 8 x i8> %0,
1251 <vscale x 8 x i8> %1,
1253 <vscale x 8 x i1> %3,
1254 iXLen 0, iXLen %4, iXLen 1)
1256 ret <vscale x 8 x i8> %a
1259 declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
1265 define <vscale x 16 x i8> @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1266 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8:
1267 ; CHECK: # %bb.0: # %entry
1268 ; CHECK-NEXT: csrwi vxrm, 0
1269 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1270 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1273 %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
1274 <vscale x 16 x i8> undef,
1275 <vscale x 16 x i8> %0,
1279 ret <vscale x 16 x i8> %a
1282 declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
1287 iXLen, iXLen, iXLen);
1289 define <vscale x 16 x i8> @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1290 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8:
1291 ; CHECK: # %bb.0: # %entry
1292 ; CHECK-NEXT: csrwi vxrm, 0
1293 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1294 ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t
1297 %a = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
1298 <vscale x 16 x i8> %0,
1299 <vscale x 16 x i8> %1,
1301 <vscale x 16 x i1> %3,
1302 iXLen 0, iXLen %4, iXLen 1)
1304 ret <vscale x 16 x i8> %a
1307 declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
1313 define <vscale x 32 x i8> @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1314 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8:
1315 ; CHECK: # %bb.0: # %entry
1316 ; CHECK-NEXT: csrwi vxrm, 0
1317 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1318 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1321 %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
1322 <vscale x 32 x i8> undef,
1323 <vscale x 32 x i8> %0,
1327 ret <vscale x 32 x i8> %a
1330 declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
1335 iXLen, iXLen, iXLen);
1337 define <vscale x 32 x i8> @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1338 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8:
1339 ; CHECK: # %bb.0: # %entry
1340 ; CHECK-NEXT: csrwi vxrm, 0
1341 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1342 ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t
1345 %a = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
1346 <vscale x 32 x i8> %0,
1347 <vscale x 32 x i8> %1,
1349 <vscale x 32 x i1> %3,
1350 iXLen 0, iXLen %4, iXLen 1)
1352 ret <vscale x 32 x i8> %a
1355 declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
1361 define <vscale x 64 x i8> @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1362 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8:
1363 ; CHECK: # %bb.0: # %entry
1364 ; CHECK-NEXT: csrwi vxrm, 0
1365 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1366 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1369 %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
1370 <vscale x 64 x i8> undef,
1371 <vscale x 64 x i8> %0,
1375 ret <vscale x 64 x i8> %a
1378 declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
1383 iXLen, iXLen, iXLen);
1385 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1386 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8:
1387 ; CHECK: # %bb.0: # %entry
1388 ; CHECK-NEXT: csrwi vxrm, 0
1389 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1390 ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t
1393 %a = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
1394 <vscale x 64 x i8> %0,
1395 <vscale x 64 x i8> %1,
1397 <vscale x 64 x i1> %3,
1398 iXLen 0, iXLen %4, iXLen 1)
1400 ret <vscale x 64 x i8> %a
1403 declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
1409 define <vscale x 1 x i16> @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1410 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16:
1411 ; CHECK: # %bb.0: # %entry
1412 ; CHECK-NEXT: csrwi vxrm, 0
1413 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1414 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1417 %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
1418 <vscale x 1 x i16> undef,
1419 <vscale x 1 x i16> %0,
1423 ret <vscale x 1 x i16> %a
1426 declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
1431 iXLen, iXLen, iXLen);
1433 define <vscale x 1 x i16> @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1434 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16:
1435 ; CHECK: # %bb.0: # %entry
1436 ; CHECK-NEXT: csrwi vxrm, 0
1437 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1438 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1441 %a = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
1442 <vscale x 1 x i16> %0,
1443 <vscale x 1 x i16> %1,
1445 <vscale x 1 x i1> %3,
1446 iXLen 0, iXLen %4, iXLen 1)
1448 ret <vscale x 1 x i16> %a
1451 declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
1457 define <vscale x 2 x i16> @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1458 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16:
1459 ; CHECK: # %bb.0: # %entry
1460 ; CHECK-NEXT: csrwi vxrm, 0
1461 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1462 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1465 %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
1466 <vscale x 2 x i16> undef,
1467 <vscale x 2 x i16> %0,
1471 ret <vscale x 2 x i16> %a
1474 declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
1479 iXLen, iXLen, iXLen);
1481 define <vscale x 2 x i16> @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1482 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16:
1483 ; CHECK: # %bb.0: # %entry
1484 ; CHECK-NEXT: csrwi vxrm, 0
1485 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1486 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1489 %a = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
1490 <vscale x 2 x i16> %0,
1491 <vscale x 2 x i16> %1,
1493 <vscale x 2 x i1> %3,
1494 iXLen 0, iXLen %4, iXLen 1)
1496 ret <vscale x 2 x i16> %a
1499 declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
1505 define <vscale x 4 x i16> @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1506 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16:
1507 ; CHECK: # %bb.0: # %entry
1508 ; CHECK-NEXT: csrwi vxrm, 0
1509 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1510 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1513 %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
1514 <vscale x 4 x i16> undef,
1515 <vscale x 4 x i16> %0,
1519 ret <vscale x 4 x i16> %a
1522 declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
1527 iXLen, iXLen, iXLen);
1529 define <vscale x 4 x i16> @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1530 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16:
1531 ; CHECK: # %bb.0: # %entry
1532 ; CHECK-NEXT: csrwi vxrm, 0
1533 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1534 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1537 %a = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
1538 <vscale x 4 x i16> %0,
1539 <vscale x 4 x i16> %1,
1541 <vscale x 4 x i1> %3,
1542 iXLen 0, iXLen %4, iXLen 1)
1544 ret <vscale x 4 x i16> %a
1547 declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
1553 define <vscale x 8 x i16> @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1554 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16:
1555 ; CHECK: # %bb.0: # %entry
1556 ; CHECK-NEXT: csrwi vxrm, 0
1557 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1558 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1561 %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
1562 <vscale x 8 x i16> undef,
1563 <vscale x 8 x i16> %0,
1567 ret <vscale x 8 x i16> %a
1570 declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
1575 iXLen, iXLen, iXLen);
1577 define <vscale x 8 x i16> @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1578 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16:
1579 ; CHECK: # %bb.0: # %entry
1580 ; CHECK-NEXT: csrwi vxrm, 0
1581 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1582 ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t
1585 %a = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
1586 <vscale x 8 x i16> %0,
1587 <vscale x 8 x i16> %1,
1589 <vscale x 8 x i1> %3,
1590 iXLen 0, iXLen %4, iXLen 1)
1592 ret <vscale x 8 x i16> %a
1595 declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
1596 <vscale x 16 x i16>,
1597 <vscale x 16 x i16>,
1601 define <vscale x 16 x i16> @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1602 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16:
1603 ; CHECK: # %bb.0: # %entry
1604 ; CHECK-NEXT: csrwi vxrm, 0
1605 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1606 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1609 %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
1610 <vscale x 16 x i16> undef,
1611 <vscale x 16 x i16> %0,
1615 ret <vscale x 16 x i16> %a
1618 declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
1619 <vscale x 16 x i16>,
1620 <vscale x 16 x i16>,
1623 iXLen, iXLen, iXLen);
1625 define <vscale x 16 x i16> @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1626 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16:
1627 ; CHECK: # %bb.0: # %entry
1628 ; CHECK-NEXT: csrwi vxrm, 0
1629 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1630 ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t
1633 %a = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
1634 <vscale x 16 x i16> %0,
1635 <vscale x 16 x i16> %1,
1637 <vscale x 16 x i1> %3,
1638 iXLen 0, iXLen %4, iXLen 1)
1640 ret <vscale x 16 x i16> %a
1643 declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
1644 <vscale x 32 x i16>,
1645 <vscale x 32 x i16>,
1649 define <vscale x 32 x i16> @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1650 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16:
1651 ; CHECK: # %bb.0: # %entry
1652 ; CHECK-NEXT: csrwi vxrm, 0
1653 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1654 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1657 %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
1658 <vscale x 32 x i16> undef,
1659 <vscale x 32 x i16> %0,
1663 ret <vscale x 32 x i16> %a
1666 declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
1667 <vscale x 32 x i16>,
1668 <vscale x 32 x i16>,
1671 iXLen, iXLen, iXLen);
1673 define <vscale x 32 x i16> @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1674 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16:
1675 ; CHECK: # %bb.0: # %entry
1676 ; CHECK-NEXT: csrwi vxrm, 0
1677 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1678 ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t
1681 %a = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
1682 <vscale x 32 x i16> %0,
1683 <vscale x 32 x i16> %1,
1685 <vscale x 32 x i1> %3,
1686 iXLen 0, iXLen %4, iXLen 1)
1688 ret <vscale x 32 x i16> %a
1691 declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
1697 define <vscale x 1 x i32> @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1698 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32:
1699 ; CHECK: # %bb.0: # %entry
1700 ; CHECK-NEXT: csrwi vxrm, 0
1701 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1702 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1705 %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
1706 <vscale x 1 x i32> undef,
1707 <vscale x 1 x i32> %0,
1711 ret <vscale x 1 x i32> %a
1714 declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
1719 iXLen, iXLen, iXLen);
1721 define <vscale x 1 x i32> @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1722 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32:
1723 ; CHECK: # %bb.0: # %entry
1724 ; CHECK-NEXT: csrwi vxrm, 0
1725 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1726 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1729 %a = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
1730 <vscale x 1 x i32> %0,
1731 <vscale x 1 x i32> %1,
1733 <vscale x 1 x i1> %3,
1734 iXLen 0, iXLen %4, iXLen 1)
1736 ret <vscale x 1 x i32> %a
1739 declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
1745 define <vscale x 2 x i32> @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1746 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32:
1747 ; CHECK: # %bb.0: # %entry
1748 ; CHECK-NEXT: csrwi vxrm, 0
1749 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1750 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1753 %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
1754 <vscale x 2 x i32> undef,
1755 <vscale x 2 x i32> %0,
1759 ret <vscale x 2 x i32> %a
1762 declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
1767 iXLen, iXLen, iXLen);
1769 define <vscale x 2 x i32> @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1770 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32:
1771 ; CHECK: # %bb.0: # %entry
1772 ; CHECK-NEXT: csrwi vxrm, 0
1773 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1774 ; CHECK-NEXT: vaadd.vx v8, v9, a0, v0.t
1777 %a = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
1778 <vscale x 2 x i32> %0,
1779 <vscale x 2 x i32> %1,
1781 <vscale x 2 x i1> %3,
1782 iXLen 0, iXLen %4, iXLen 1)
1784 ret <vscale x 2 x i32> %a
1787 declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
1793 define <vscale x 4 x i32> @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1794 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32:
1795 ; CHECK: # %bb.0: # %entry
1796 ; CHECK-NEXT: csrwi vxrm, 0
1797 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1798 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1801 %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
1802 <vscale x 4 x i32> undef,
1803 <vscale x 4 x i32> %0,
1807 ret <vscale x 4 x i32> %a
1810 declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
1815 iXLen, iXLen, iXLen);
1817 define <vscale x 4 x i32> @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1818 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32:
1819 ; CHECK: # %bb.0: # %entry
1820 ; CHECK-NEXT: csrwi vxrm, 0
1821 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1822 ; CHECK-NEXT: vaadd.vx v8, v10, a0, v0.t
1825 %a = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
1826 <vscale x 4 x i32> %0,
1827 <vscale x 4 x i32> %1,
1829 <vscale x 4 x i1> %3,
1830 iXLen 0, iXLen %4, iXLen 1)
1832 ret <vscale x 4 x i32> %a
1835 declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
1841 define <vscale x 8 x i32> @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1842 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32:
1843 ; CHECK: # %bb.0: # %entry
1844 ; CHECK-NEXT: csrwi vxrm, 0
1845 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1846 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1849 %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
1850 <vscale x 8 x i32> undef,
1851 <vscale x 8 x i32> %0,
1855 ret <vscale x 8 x i32> %a
1858 declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
1863 iXLen, iXLen, iXLen);
1865 define <vscale x 8 x i32> @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1866 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32:
1867 ; CHECK: # %bb.0: # %entry
1868 ; CHECK-NEXT: csrwi vxrm, 0
1869 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1870 ; CHECK-NEXT: vaadd.vx v8, v12, a0, v0.t
1873 %a = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
1874 <vscale x 8 x i32> %0,
1875 <vscale x 8 x i32> %1,
1877 <vscale x 8 x i1> %3,
1878 iXLen 0, iXLen %4, iXLen 1)
1880 ret <vscale x 8 x i32> %a
1883 declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
1884 <vscale x 16 x i32>,
1885 <vscale x 16 x i32>,
1889 define <vscale x 16 x i32> @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1890 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32:
1891 ; CHECK: # %bb.0: # %entry
1892 ; CHECK-NEXT: csrwi vxrm, 0
1893 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1894 ; CHECK-NEXT: vaadd.vx v8, v8, a0
1897 %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
1898 <vscale x 16 x i32> undef,
1899 <vscale x 16 x i32> %0,
1903 ret <vscale x 16 x i32> %a
1906 declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
1907 <vscale x 16 x i32>,
1908 <vscale x 16 x i32>,
1911 iXLen, iXLen, iXLen);
1913 define <vscale x 16 x i32> @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1914 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32:
1915 ; CHECK: # %bb.0: # %entry
1916 ; CHECK-NEXT: csrwi vxrm, 0
1917 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1918 ; CHECK-NEXT: vaadd.vx v8, v16, a0, v0.t
1921 %a = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
1922 <vscale x 16 x i32> %0,
1923 <vscale x 16 x i32> %1,
1925 <vscale x 16 x i1> %3,
1926 iXLen 0, iXLen %4, iXLen 1)
1928 ret <vscale x 16 x i32> %a
1931 declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
1937 define <vscale x 1 x i64> @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1938 ; RV32-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64:
1939 ; RV32: # %bb.0: # %entry
1940 ; RV32-NEXT: addi sp, sp, -16
1941 ; RV32-NEXT: sw a1, 12(sp)
1942 ; RV32-NEXT: sw a0, 8(sp)
1943 ; RV32-NEXT: addi a0, sp, 8
1944 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1945 ; RV32-NEXT: vlse64.v v9, (a0), zero
1946 ; RV32-NEXT: csrwi vxrm, 0
1947 ; RV32-NEXT: vaadd.vv v8, v8, v9
1948 ; RV32-NEXT: addi sp, sp, 16
1951 ; RV64-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64:
1952 ; RV64: # %bb.0: # %entry
1953 ; RV64-NEXT: csrwi vxrm, 0
1954 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1955 ; RV64-NEXT: vaadd.vx v8, v8, a0
1958 %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64(
1959 <vscale x 1 x i64> undef,
1960 <vscale x 1 x i64> %0,
1964 ret <vscale x 1 x i64> %a
1967 declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
1972 iXLen, iXLen, iXLen);
1974 define <vscale x 1 x i64> @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1975 ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64:
1976 ; RV32: # %bb.0: # %entry
1977 ; RV32-NEXT: addi sp, sp, -16
1978 ; RV32-NEXT: sw a1, 12(sp)
1979 ; RV32-NEXT: sw a0, 8(sp)
1980 ; RV32-NEXT: addi a0, sp, 8
1981 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1982 ; RV32-NEXT: vlse64.v v10, (a0), zero
1983 ; RV32-NEXT: csrwi vxrm, 0
1984 ; RV32-NEXT: vaadd.vv v8, v9, v10, v0.t
1985 ; RV32-NEXT: addi sp, sp, 16
1988 ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64:
1989 ; RV64: # %bb.0: # %entry
1990 ; RV64-NEXT: csrwi vxrm, 0
1991 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1992 ; RV64-NEXT: vaadd.vx v8, v9, a0, v0.t
1995 %a = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64(
1996 <vscale x 1 x i64> %0,
1997 <vscale x 1 x i64> %1,
1999 <vscale x 1 x i1> %3,
2000 iXLen 0, iXLen %4, iXLen 1)
2002 ret <vscale x 1 x i64> %a
2005 declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
2011 define <vscale x 2 x i64> @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
2012 ; RV32-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64:
2013 ; RV32: # %bb.0: # %entry
2014 ; RV32-NEXT: addi sp, sp, -16
2015 ; RV32-NEXT: sw a1, 12(sp)
2016 ; RV32-NEXT: sw a0, 8(sp)
2017 ; RV32-NEXT: addi a0, sp, 8
2018 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
2019 ; RV32-NEXT: vlse64.v v10, (a0), zero
2020 ; RV32-NEXT: csrwi vxrm, 0
2021 ; RV32-NEXT: vaadd.vv v8, v8, v10
2022 ; RV32-NEXT: addi sp, sp, 16
2025 ; RV64-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64:
2026 ; RV64: # %bb.0: # %entry
2027 ; RV64-NEXT: csrwi vxrm, 0
2028 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2029 ; RV64-NEXT: vaadd.vx v8, v8, a0
2032 %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64(
2033 <vscale x 2 x i64> undef,
2034 <vscale x 2 x i64> %0,
2038 ret <vscale x 2 x i64> %a
2041 declare <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
2046 iXLen, iXLen, iXLen);
2048 define <vscale x 2 x i64> @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2049 ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64:
2050 ; RV32: # %bb.0: # %entry
2051 ; RV32-NEXT: addi sp, sp, -16
2052 ; RV32-NEXT: sw a1, 12(sp)
2053 ; RV32-NEXT: sw a0, 8(sp)
2054 ; RV32-NEXT: addi a0, sp, 8
2055 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2056 ; RV32-NEXT: vlse64.v v12, (a0), zero
2057 ; RV32-NEXT: csrwi vxrm, 0
2058 ; RV32-NEXT: vaadd.vv v8, v10, v12, v0.t
2059 ; RV32-NEXT: addi sp, sp, 16
2062 ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64:
2063 ; RV64: # %bb.0: # %entry
2064 ; RV64-NEXT: csrwi vxrm, 0
2065 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2066 ; RV64-NEXT: vaadd.vx v8, v10, a0, v0.t
2069 %a = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64(
2070 <vscale x 2 x i64> %0,
2071 <vscale x 2 x i64> %1,
2073 <vscale x 2 x i1> %3,
2074 iXLen 0, iXLen %4, iXLen 1)
2076 ret <vscale x 2 x i64> %a
2079 declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
2085 define <vscale x 4 x i64> @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
2086 ; RV32-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64:
2087 ; RV32: # %bb.0: # %entry
2088 ; RV32-NEXT: addi sp, sp, -16
2089 ; RV32-NEXT: sw a1, 12(sp)
2090 ; RV32-NEXT: sw a0, 8(sp)
2091 ; RV32-NEXT: addi a0, sp, 8
2092 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2093 ; RV32-NEXT: vlse64.v v12, (a0), zero
2094 ; RV32-NEXT: csrwi vxrm, 0
2095 ; RV32-NEXT: vaadd.vv v8, v8, v12
2096 ; RV32-NEXT: addi sp, sp, 16
2099 ; RV64-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64:
2100 ; RV64: # %bb.0: # %entry
2101 ; RV64-NEXT: csrwi vxrm, 0
2102 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2103 ; RV64-NEXT: vaadd.vx v8, v8, a0
2106 %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64(
2107 <vscale x 4 x i64> undef,
2108 <vscale x 4 x i64> %0,
2112 ret <vscale x 4 x i64> %a
2115 declare <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
2120 iXLen, iXLen, iXLen);
2122 define <vscale x 4 x i64> @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2123 ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64:
2124 ; RV32: # %bb.0: # %entry
2125 ; RV32-NEXT: addi sp, sp, -16
2126 ; RV32-NEXT: sw a1, 12(sp)
2127 ; RV32-NEXT: sw a0, 8(sp)
2128 ; RV32-NEXT: addi a0, sp, 8
2129 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2130 ; RV32-NEXT: vlse64.v v16, (a0), zero
2131 ; RV32-NEXT: csrwi vxrm, 0
2132 ; RV32-NEXT: vaadd.vv v8, v12, v16, v0.t
2133 ; RV32-NEXT: addi sp, sp, 16
2136 ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64:
2137 ; RV64: # %bb.0: # %entry
2138 ; RV64-NEXT: csrwi vxrm, 0
2139 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2140 ; RV64-NEXT: vaadd.vx v8, v12, a0, v0.t
2143 %a = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64(
2144 <vscale x 4 x i64> %0,
2145 <vscale x 4 x i64> %1,
2147 <vscale x 4 x i1> %3,
2148 iXLen 0, iXLen %4, iXLen 1)
2150 ret <vscale x 4 x i64> %a
2153 declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
2159 define <vscale x 8 x i64> @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2160 ; RV32-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64:
2161 ; RV32: # %bb.0: # %entry
2162 ; RV32-NEXT: addi sp, sp, -16
2163 ; RV32-NEXT: sw a1, 12(sp)
2164 ; RV32-NEXT: sw a0, 8(sp)
2165 ; RV32-NEXT: addi a0, sp, 8
2166 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2167 ; RV32-NEXT: vlse64.v v16, (a0), zero
2168 ; RV32-NEXT: csrwi vxrm, 0
2169 ; RV32-NEXT: vaadd.vv v8, v8, v16
2170 ; RV32-NEXT: addi sp, sp, 16
2173 ; RV64-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64:
2174 ; RV64: # %bb.0: # %entry
2175 ; RV64-NEXT: csrwi vxrm, 0
2176 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2177 ; RV64-NEXT: vaadd.vx v8, v8, a0
2180 %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64(
2181 <vscale x 8 x i64> undef,
2182 <vscale x 8 x i64> %0,
2186 ret <vscale x 8 x i64> %a
2189 declare <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
2194 iXLen, iXLen, iXLen);
2196 define <vscale x 8 x i64> @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2197 ; RV32-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64:
2198 ; RV32: # %bb.0: # %entry
2199 ; RV32-NEXT: addi sp, sp, -16
2200 ; RV32-NEXT: sw a1, 12(sp)
2201 ; RV32-NEXT: sw a0, 8(sp)
2202 ; RV32-NEXT: addi a0, sp, 8
2203 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2204 ; RV32-NEXT: vlse64.v v24, (a0), zero
2205 ; RV32-NEXT: csrwi vxrm, 0
2206 ; RV32-NEXT: vaadd.vv v8, v16, v24, v0.t
2207 ; RV32-NEXT: addi sp, sp, 16
2210 ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64:
2211 ; RV64: # %bb.0: # %entry
2212 ; RV64-NEXT: csrwi vxrm, 0
2213 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2214 ; RV64-NEXT: vaadd.vx v8, v16, a0, v0.t
2217 %a = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64(
2218 <vscale x 8 x i64> %0,
2219 <vscale x 8 x i64> %1,
2221 <vscale x 8 x i1> %3,
2222 iXLen 0, iXLen %4, iXLen 1)
2224 ret <vscale x 8 x i64> %a