1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
5 ; RUN: -verify-machineinstrs -early-live-intervals | FileCheck %s
6 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
7 ; RUN: -verify-machineinstrs | FileCheck %s
8 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
9 ; RUN: -verify-machineinstrs -early-live-intervals | FileCheck %s
11 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
17 define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
18 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
19 ; CHECK: # %bb.0: # %entry
20 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
21 ; CHECK-NEXT: vwadd.wv v8, v8, v9
24 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
25 <vscale x 1 x i16> undef,
26 <vscale x 1 x i16> %0,
30 ret <vscale x 1 x i16> %a
33 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
41 define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
42 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
43 ; CHECK: # %bb.0: # %entry
44 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
45 ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t
48 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
49 <vscale x 1 x i16> %0,
50 <vscale x 1 x i16> %1,
55 ret <vscale x 1 x i16> %a
58 declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
64 define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
65 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8:
66 ; CHECK: # %bb.0: # %entry
67 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
68 ; CHECK-NEXT: vwadd.wv v8, v8, v9
71 %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
72 <vscale x 2 x i16> undef,
73 <vscale x 2 x i16> %0,
77 ret <vscale x 2 x i16> %a
80 declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
88 define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
90 ; CHECK: # %bb.0: # %entry
91 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
92 ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t
95 %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
96 <vscale x 2 x i16> %0,
97 <vscale x 2 x i16> %1,
102 ret <vscale x 2 x i16> %a
105 declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
111 define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
112 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
115 ; CHECK-NEXT: vwadd.wv v8, v8, v9
118 %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
119 <vscale x 4 x i16> undef,
120 <vscale x 4 x i16> %0,
121 <vscale x 4 x i8> %1,
124 ret <vscale x 4 x i16> %a
127 declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
135 define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
136 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
137 ; CHECK: # %bb.0: # %entry
138 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
139 ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t
142 %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
143 <vscale x 4 x i16> %0,
144 <vscale x 4 x i16> %1,
145 <vscale x 4 x i8> %2,
146 <vscale x 4 x i1> %3,
149 ret <vscale x 4 x i16> %a
152 declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
158 define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
159 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8:
160 ; CHECK: # %bb.0: # %entry
161 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
162 ; CHECK-NEXT: vwadd.wv v8, v8, v10
165 %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
166 <vscale x 8 x i16> undef,
167 <vscale x 8 x i16> %0,
168 <vscale x 8 x i8> %1,
171 ret <vscale x 8 x i16> %a
174 declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
182 define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
184 ; CHECK: # %bb.0: # %entry
185 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
186 ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t
189 %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
190 <vscale x 8 x i16> %0,
191 <vscale x 8 x i16> %1,
192 <vscale x 8 x i8> %2,
193 <vscale x 8 x i1> %3,
196 ret <vscale x 8 x i16> %a
199 declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
205 define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
206 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8:
207 ; CHECK: # %bb.0: # %entry
208 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
209 ; CHECK-NEXT: vwadd.wv v8, v8, v12
212 %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
213 <vscale x 16 x i16> undef,
214 <vscale x 16 x i16> %0,
215 <vscale x 16 x i8> %1,
218 ret <vscale x 16 x i16> %a
221 declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
229 define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
230 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
231 ; CHECK: # %bb.0: # %entry
232 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
233 ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t
236 %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
237 <vscale x 16 x i16> %0,
238 <vscale x 16 x i16> %1,
239 <vscale x 16 x i8> %2,
240 <vscale x 16 x i1> %3,
243 ret <vscale x 16 x i16> %a
246 declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
252 define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
253 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8:
254 ; CHECK: # %bb.0: # %entry
255 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
256 ; CHECK-NEXT: vwadd.wv v8, v8, v16
259 %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
260 <vscale x 32 x i16> undef,
261 <vscale x 32 x i16> %0,
262 <vscale x 32 x i8> %1,
265 ret <vscale x 32 x i16> %a
268 declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
276 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
277 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
278 ; CHECK: # %bb.0: # %entry
279 ; CHECK-NEXT: vl4r.v v24, (a0)
280 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
281 ; CHECK-NEXT: vwadd.wv v8, v16, v24, v0.t
284 %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
285 <vscale x 32 x i16> %0,
286 <vscale x 32 x i16> %1,
287 <vscale x 32 x i8> %2,
288 <vscale x 32 x i1> %3,
291 ret <vscale x 32 x i16> %a
294 declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
300 define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
301 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16:
302 ; CHECK: # %bb.0: # %entry
303 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
304 ; CHECK-NEXT: vwadd.wv v8, v8, v9
307 %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
308 <vscale x 1 x i32> undef,
309 <vscale x 1 x i32> %0,
310 <vscale x 1 x i16> %1,
313 ret <vscale x 1 x i32> %a
316 declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
324 define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
325 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
328 ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t
331 %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
332 <vscale x 1 x i32> %0,
333 <vscale x 1 x i32> %1,
334 <vscale x 1 x i16> %2,
335 <vscale x 1 x i1> %3,
338 ret <vscale x 1 x i32> %a
341 declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
347 define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
348 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16:
349 ; CHECK: # %bb.0: # %entry
350 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
351 ; CHECK-NEXT: vwadd.wv v8, v8, v9
354 %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
355 <vscale x 2 x i32> undef,
356 <vscale x 2 x i32> %0,
357 <vscale x 2 x i16> %1,
360 ret <vscale x 2 x i32> %a
363 declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
371 define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
372 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
373 ; CHECK: # %bb.0: # %entry
374 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
375 ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t
378 %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
379 <vscale x 2 x i32> %0,
380 <vscale x 2 x i32> %1,
381 <vscale x 2 x i16> %2,
382 <vscale x 2 x i1> %3,
385 ret <vscale x 2 x i32> %a
388 declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
394 define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
395 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16:
396 ; CHECK: # %bb.0: # %entry
397 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
398 ; CHECK-NEXT: vwadd.wv v8, v8, v10
401 %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
402 <vscale x 4 x i32> undef,
403 <vscale x 4 x i32> %0,
404 <vscale x 4 x i16> %1,
407 ret <vscale x 4 x i32> %a
410 declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
418 define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
419 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
420 ; CHECK: # %bb.0: # %entry
421 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
422 ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t
425 %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
426 <vscale x 4 x i32> %0,
427 <vscale x 4 x i32> %1,
428 <vscale x 4 x i16> %2,
429 <vscale x 4 x i1> %3,
432 ret <vscale x 4 x i32> %a
435 declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
441 define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
442 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16:
443 ; CHECK: # %bb.0: # %entry
444 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
445 ; CHECK-NEXT: vwadd.wv v8, v8, v12
448 %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
449 <vscale x 8 x i32> undef,
450 <vscale x 8 x i32> %0,
451 <vscale x 8 x i16> %1,
454 ret <vscale x 8 x i32> %a
457 declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
465 define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
466 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
467 ; CHECK: # %bb.0: # %entry
468 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
469 ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t
472 %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
473 <vscale x 8 x i32> %0,
474 <vscale x 8 x i32> %1,
475 <vscale x 8 x i16> %2,
476 <vscale x 8 x i1> %3,
479 ret <vscale x 8 x i32> %a
482 declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
488 define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
489 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16:
490 ; CHECK: # %bb.0: # %entry
491 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
492 ; CHECK-NEXT: vwadd.wv v8, v8, v16
495 %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
496 <vscale x 16 x i32> undef,
497 <vscale x 16 x i32> %0,
498 <vscale x 16 x i16> %1,
501 ret <vscale x 16 x i32> %a
504 declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
512 define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
513 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
514 ; CHECK: # %bb.0: # %entry
515 ; CHECK-NEXT: vl4re16.v v24, (a0)
516 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
517 ; CHECK-NEXT: vwadd.wv v8, v16, v24, v0.t
520 %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
521 <vscale x 16 x i32> %0,
522 <vscale x 16 x i32> %1,
523 <vscale x 16 x i16> %2,
524 <vscale x 16 x i1> %3,
527 ret <vscale x 16 x i32> %a
530 declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
536 define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
537 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32:
538 ; CHECK: # %bb.0: # %entry
539 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
540 ; CHECK-NEXT: vwadd.wv v8, v8, v9
543 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
544 <vscale x 1 x i64> undef,
545 <vscale x 1 x i64> %0,
546 <vscale x 1 x i32> %1,
549 ret <vscale x 1 x i64> %a
552 declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
560 define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
561 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
562 ; CHECK: # %bb.0: # %entry
563 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
564 ; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t
567 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
568 <vscale x 1 x i64> %0,
569 <vscale x 1 x i64> %1,
570 <vscale x 1 x i32> %2,
571 <vscale x 1 x i1> %3,
574 ret <vscale x 1 x i64> %a
577 declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
583 define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
584 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32:
585 ; CHECK: # %bb.0: # %entry
586 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
587 ; CHECK-NEXT: vwadd.wv v8, v8, v10
590 %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
591 <vscale x 2 x i64> undef,
592 <vscale x 2 x i64> %0,
593 <vscale x 2 x i32> %1,
596 ret <vscale x 2 x i64> %a
599 declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
607 define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
608 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
609 ; CHECK: # %bb.0: # %entry
610 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
611 ; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t
614 %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
615 <vscale x 2 x i64> %0,
616 <vscale x 2 x i64> %1,
617 <vscale x 2 x i32> %2,
618 <vscale x 2 x i1> %3,
621 ret <vscale x 2 x i64> %a
624 declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
630 define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
631 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32:
632 ; CHECK: # %bb.0: # %entry
633 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
634 ; CHECK-NEXT: vwadd.wv v8, v8, v12
637 %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
638 <vscale x 4 x i64> undef,
639 <vscale x 4 x i64> %0,
640 <vscale x 4 x i32> %1,
643 ret <vscale x 4 x i64> %a
646 declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
654 define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
655 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
656 ; CHECK: # %bb.0: # %entry
657 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
658 ; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t
661 %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
662 <vscale x 4 x i64> %0,
663 <vscale x 4 x i64> %1,
664 <vscale x 4 x i32> %2,
665 <vscale x 4 x i1> %3,
668 ret <vscale x 4 x i64> %a
671 declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
677 define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
678 ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32:
679 ; CHECK: # %bb.0: # %entry
680 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
681 ; CHECK-NEXT: vwadd.wv v8, v8, v16
684 %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
685 <vscale x 8 x i64> undef,
686 <vscale x 8 x i64> %0,
687 <vscale x 8 x i32> %1,
690 ret <vscale x 8 x i64> %a
693 declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
701 define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
702 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
703 ; CHECK: # %bb.0: # %entry
704 ; CHECK-NEXT: vl4re32.v v24, (a0)
705 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
706 ; CHECK-NEXT: vwadd.wv v8, v16, v24, v0.t
709 %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
710 <vscale x 8 x i64> %0,
711 <vscale x 8 x i64> %1,
712 <vscale x 8 x i32> %2,
713 <vscale x 8 x i1> %3,
716 ret <vscale x 8 x i64> %a
719 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
725 define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, iXLen %2) nounwind {
726 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8:
727 ; CHECK: # %bb.0: # %entry
728 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
729 ; CHECK-NEXT: vwadd.wx v8, v8, a0
732 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
733 <vscale x 1 x i16> undef,
734 <vscale x 1 x i16> %0,
738 ret <vscale x 1 x i16> %a
741 declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
749 define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
750 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8:
751 ; CHECK: # %bb.0: # %entry
752 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
753 ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t
756 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
757 <vscale x 1 x i16> %0,
758 <vscale x 1 x i16> %1,
760 <vscale x 1 x i1> %3,
763 ret <vscale x 1 x i16> %a
766 declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
772 define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, iXLen %2) nounwind {
773 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8:
774 ; CHECK: # %bb.0: # %entry
775 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
776 ; CHECK-NEXT: vwadd.wx v8, v8, a0
779 %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
780 <vscale x 2 x i16> undef,
781 <vscale x 2 x i16> %0,
785 ret <vscale x 2 x i16> %a
788 declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
796 define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
797 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8:
798 ; CHECK: # %bb.0: # %entry
799 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
800 ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t
803 %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
804 <vscale x 2 x i16> %0,
805 <vscale x 2 x i16> %1,
807 <vscale x 2 x i1> %3,
810 ret <vscale x 2 x i16> %a
813 declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
819 define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, iXLen %2) nounwind {
820 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8:
821 ; CHECK: # %bb.0: # %entry
822 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
823 ; CHECK-NEXT: vwadd.wx v8, v8, a0
826 %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
827 <vscale x 4 x i16> undef,
828 <vscale x 4 x i16> %0,
832 ret <vscale x 4 x i16> %a
835 declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
843 define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
844 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8:
845 ; CHECK: # %bb.0: # %entry
846 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
847 ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t
850 %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
851 <vscale x 4 x i16> %0,
852 <vscale x 4 x i16> %1,
854 <vscale x 4 x i1> %3,
857 ret <vscale x 4 x i16> %a
860 declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
866 define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, iXLen %2) nounwind {
867 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8:
868 ; CHECK: # %bb.0: # %entry
869 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
870 ; CHECK-NEXT: vwadd.wx v8, v8, a0
873 %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
874 <vscale x 8 x i16> undef,
875 <vscale x 8 x i16> %0,
879 ret <vscale x 8 x i16> %a
882 declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
890 define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
891 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8:
892 ; CHECK: # %bb.0: # %entry
893 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
894 ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t
897 %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
898 <vscale x 8 x i16> %0,
899 <vscale x 8 x i16> %1,
901 <vscale x 8 x i1> %3,
904 ret <vscale x 8 x i16> %a
907 declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
913 define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, iXLen %2) nounwind {
914 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8:
915 ; CHECK: # %bb.0: # %entry
916 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
917 ; CHECK-NEXT: vwadd.wx v8, v8, a0
920 %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
921 <vscale x 16 x i16> undef,
922 <vscale x 16 x i16> %0,
926 ret <vscale x 16 x i16> %a
929 declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
937 define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
938 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8:
939 ; CHECK: # %bb.0: # %entry
940 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
941 ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t
944 %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
945 <vscale x 16 x i16> %0,
946 <vscale x 16 x i16> %1,
948 <vscale x 16 x i1> %3,
951 ret <vscale x 16 x i16> %a
954 declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
960 define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, iXLen %2) nounwind {
961 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8:
962 ; CHECK: # %bb.0: # %entry
963 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
964 ; CHECK-NEXT: vwadd.wx v8, v8, a0
967 %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
968 <vscale x 32 x i16> undef,
969 <vscale x 32 x i16> %0,
973 ret <vscale x 32 x i16> %a
976 declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
984 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
985 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8:
986 ; CHECK: # %bb.0: # %entry
987 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
988 ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t
991 %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
992 <vscale x 32 x i16> %0,
993 <vscale x 32 x i16> %1,
995 <vscale x 32 x i1> %3,
998 ret <vscale x 32 x i16> %a
1001 declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
1007 define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, iXLen %2) nounwind {
1008 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16:
1009 ; CHECK: # %bb.0: # %entry
1010 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1011 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1014 %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
1015 <vscale x 1 x i32> undef,
1016 <vscale x 1 x i32> %0,
1020 ret <vscale x 1 x i32> %a
1023 declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
1031 define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1032 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16:
1033 ; CHECK: # %bb.0: # %entry
1034 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1035 ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t
1038 %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
1039 <vscale x 1 x i32> %0,
1040 <vscale x 1 x i32> %1,
1042 <vscale x 1 x i1> %3,
1045 ret <vscale x 1 x i32> %a
1048 declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
1054 define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, iXLen %2) nounwind {
1055 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16:
1056 ; CHECK: # %bb.0: # %entry
1057 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1058 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1061 %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
1062 <vscale x 2 x i32> undef,
1063 <vscale x 2 x i32> %0,
1067 ret <vscale x 2 x i32> %a
1070 declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
1078 define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1079 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16:
1080 ; CHECK: # %bb.0: # %entry
1081 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1082 ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t
1085 %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
1086 <vscale x 2 x i32> %0,
1087 <vscale x 2 x i32> %1,
1089 <vscale x 2 x i1> %3,
1092 ret <vscale x 2 x i32> %a
1095 declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
1101 define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, iXLen %2) nounwind {
1102 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16:
1103 ; CHECK: # %bb.0: # %entry
1104 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1105 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1108 %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
1109 <vscale x 4 x i32> undef,
1110 <vscale x 4 x i32> %0,
1114 ret <vscale x 4 x i32> %a
1117 declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1125 define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1126 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16:
1127 ; CHECK: # %bb.0: # %entry
1128 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1129 ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t
1132 %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1133 <vscale x 4 x i32> %0,
1134 <vscale x 4 x i32> %1,
1136 <vscale x 4 x i1> %3,
1139 ret <vscale x 4 x i32> %a
1142 declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
1148 define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, iXLen %2) nounwind {
1149 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16:
1150 ; CHECK: # %bb.0: # %entry
1151 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1152 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1155 %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
1156 <vscale x 8 x i32> undef,
1157 <vscale x 8 x i32> %0,
1161 ret <vscale x 8 x i32> %a
1164 declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1172 define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1173 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16:
1174 ; CHECK: # %bb.0: # %entry
1175 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1176 ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t
1179 %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1180 <vscale x 8 x i32> %0,
1181 <vscale x 8 x i32> %1,
1183 <vscale x 8 x i1> %3,
1186 ret <vscale x 8 x i32> %a
1189 declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
1190 <vscale x 16 x i32>,
1191 <vscale x 16 x i32>,
1195 define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, iXLen %2) nounwind {
1196 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16:
1197 ; CHECK: # %bb.0: # %entry
1198 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1199 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1202 %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
1203 <vscale x 16 x i32> undef,
1204 <vscale x 16 x i32> %0,
1208 ret <vscale x 16 x i32> %a
1211 declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1212 <vscale x 16 x i32>,
1213 <vscale x 16 x i32>,
1219 define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1220 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16:
1221 ; CHECK: # %bb.0: # %entry
1222 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1223 ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t
1226 %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1227 <vscale x 16 x i32> %0,
1228 <vscale x 16 x i32> %1,
1230 <vscale x 16 x i1> %3,
1233 ret <vscale x 16 x i32> %a
1236 declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
1242 define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
1243 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32:
1244 ; CHECK: # %bb.0: # %entry
1245 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1246 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1249 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
1250 <vscale x 1 x i64> undef,
1251 <vscale x 1 x i64> %0,
1255 ret <vscale x 1 x i64> %a
1258 declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1266 define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1267 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32:
1268 ; CHECK: # %bb.0: # %entry
1269 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1270 ; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t
1273 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1274 <vscale x 1 x i64> %0,
1275 <vscale x 1 x i64> %1,
1277 <vscale x 1 x i1> %3,
1280 ret <vscale x 1 x i64> %a
1283 declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
1289 define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, iXLen %2) nounwind {
1290 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32:
1291 ; CHECK: # %bb.0: # %entry
1292 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1293 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1296 %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
1297 <vscale x 2 x i64> undef,
1298 <vscale x 2 x i64> %0,
1302 ret <vscale x 2 x i64> %a
1305 declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1313 define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1314 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32:
1315 ; CHECK: # %bb.0: # %entry
1316 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1317 ; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t
1320 %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1321 <vscale x 2 x i64> %0,
1322 <vscale x 2 x i64> %1,
1324 <vscale x 2 x i1> %3,
1327 ret <vscale x 2 x i64> %a
1330 declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
1336 define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, iXLen %2) nounwind {
1337 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32:
1338 ; CHECK: # %bb.0: # %entry
1339 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1340 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1343 %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
1344 <vscale x 4 x i64> undef,
1345 <vscale x 4 x i64> %0,
1349 ret <vscale x 4 x i64> %a
1352 declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1360 define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1361 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32:
1362 ; CHECK: # %bb.0: # %entry
1363 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1364 ; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t
1367 %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1368 <vscale x 4 x i64> %0,
1369 <vscale x 4 x i64> %1,
1371 <vscale x 4 x i1> %3,
1374 ret <vscale x 4 x i64> %a
1377 declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
1383 define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, iXLen %2) nounwind {
1384 ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32:
1385 ; CHECK: # %bb.0: # %entry
1386 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1387 ; CHECK-NEXT: vwadd.wx v8, v8, a0
1390 %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
1391 <vscale x 8 x i64> undef,
1392 <vscale x 8 x i64> %0,
1396 ret <vscale x 8 x i64> %a
1399 declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1407 define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1408 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32:
1409 ; CHECK: # %bb.0: # %entry
1410 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1411 ; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t
1414 %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1415 <vscale x 8 x i64> %0,
1416 <vscale x 8 x i64> %1,
1418 <vscale x 8 x i1> %3,
1421 ret <vscale x 8 x i64> %a
1424 define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1425 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
1426 ; CHECK: # %bb.0: # %entry
1427 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1428 ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t
1431 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
1432 <vscale x 1 x i16> %0,
1433 <vscale x 1 x i16> %0,
1434 <vscale x 1 x i8> %1,
1435 <vscale x 1 x i1> %2,
1438 ret <vscale x 1 x i16> %a
1441 define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1442 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
1443 ; CHECK: # %bb.0: # %entry
1444 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1445 ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t
1448 %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
1449 <vscale x 2 x i16> %0,
1450 <vscale x 2 x i16> %0,
1451 <vscale x 2 x i8> %1,
1452 <vscale x 2 x i1> %2,
1455 ret <vscale x 2 x i16> %a
1458 define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1459 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
1460 ; CHECK: # %bb.0: # %entry
1461 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1462 ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t
1465 %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
1466 <vscale x 4 x i16> %0,
1467 <vscale x 4 x i16> %0,
1468 <vscale x 4 x i8> %1,
1469 <vscale x 4 x i1> %2,
1472 ret <vscale x 4 x i16> %a
1475 define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1476 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
1477 ; CHECK: # %bb.0: # %entry
1478 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
1479 ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t
1482 %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
1483 <vscale x 8 x i16> %0,
1484 <vscale x 8 x i16> %0,
1485 <vscale x 8 x i8> %1,
1486 <vscale x 8 x i1> %2,
1489 ret <vscale x 8 x i16> %a
1492 define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1493 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
1494 ; CHECK: # %bb.0: # %entry
1495 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
1496 ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t
1499 %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
1500 <vscale x 16 x i16> %0,
1501 <vscale x 16 x i16> %0,
1502 <vscale x 16 x i8> %1,
1503 <vscale x 16 x i1> %2,
1506 ret <vscale x 16 x i16> %a
1509 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1510 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
1511 ; CHECK: # %bb.0: # %entry
1512 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
1513 ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t
1516 %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
1517 <vscale x 32 x i16> %0,
1518 <vscale x 32 x i16> %0,
1519 <vscale x 32 x i8> %1,
1520 <vscale x 32 x i1> %2,
1523 ret <vscale x 32 x i16> %a
1526 define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1527 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
1528 ; CHECK: # %bb.0: # %entry
1529 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
1530 ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t
1533 %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
1534 <vscale x 1 x i32> %0,
1535 <vscale x 1 x i32> %0,
1536 <vscale x 1 x i16> %1,
1537 <vscale x 1 x i1> %2,
1540 ret <vscale x 1 x i32> %a
1543 define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1544 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
1545 ; CHECK: # %bb.0: # %entry
1546 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
1547 ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t
1550 %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
1551 <vscale x 2 x i32> %0,
1552 <vscale x 2 x i32> %0,
1553 <vscale x 2 x i16> %1,
1554 <vscale x 2 x i1> %2,
1557 ret <vscale x 2 x i32> %a
1560 define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1561 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
1562 ; CHECK: # %bb.0: # %entry
1563 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1564 ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t
1567 %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
1568 <vscale x 4 x i32> %0,
1569 <vscale x 4 x i32> %0,
1570 <vscale x 4 x i16> %1,
1571 <vscale x 4 x i1> %2,
1574 ret <vscale x 4 x i32> %a
1577 define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1578 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
1579 ; CHECK: # %bb.0: # %entry
1580 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1581 ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t
1584 %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
1585 <vscale x 8 x i32> %0,
1586 <vscale x 8 x i32> %0,
1587 <vscale x 8 x i16> %1,
1588 <vscale x 8 x i1> %2,
1591 ret <vscale x 8 x i32> %a
1594 define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1595 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
1596 ; CHECK: # %bb.0: # %entry
1597 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1598 ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t
1601 %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
1602 <vscale x 16 x i32> %0,
1603 <vscale x 16 x i32> %0,
1604 <vscale x 16 x i16> %1,
1605 <vscale x 16 x i1> %2,
1608 ret <vscale x 16 x i32> %a
1611 define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1612 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
1613 ; CHECK: # %bb.0: # %entry
1614 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1615 ; CHECK-NEXT: vwadd.wv v8, v8, v9, v0.t
1618 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
1619 <vscale x 1 x i64> %0,
1620 <vscale x 1 x i64> %0,
1621 <vscale x 1 x i32> %1,
1622 <vscale x 1 x i1> %2,
1625 ret <vscale x 1 x i64> %a
1628 define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1629 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
1630 ; CHECK: # %bb.0: # %entry
1631 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1632 ; CHECK-NEXT: vwadd.wv v8, v8, v10, v0.t
1635 %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
1636 <vscale x 2 x i64> %0,
1637 <vscale x 2 x i64> %0,
1638 <vscale x 2 x i32> %1,
1639 <vscale x 2 x i1> %2,
1642 ret <vscale x 2 x i64> %a
1645 define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1646 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
1647 ; CHECK: # %bb.0: # %entry
1648 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1649 ; CHECK-NEXT: vwadd.wv v8, v8, v12, v0.t
1652 %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
1653 <vscale x 4 x i64> %0,
1654 <vscale x 4 x i64> %0,
1655 <vscale x 4 x i32> %1,
1656 <vscale x 4 x i1> %2,
1659 ret <vscale x 4 x i64> %a
1662 define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1663 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
1664 ; CHECK: # %bb.0: # %entry
1665 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1666 ; CHECK-NEXT: vwadd.wv v8, v8, v16, v0.t
1669 %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
1670 <vscale x 8 x i64> %0,
1671 <vscale x 8 x i64> %0,
1672 <vscale x 8 x i32> %1,
1673 <vscale x 8 x i1> %2,
1676 ret <vscale x 8 x i64> %a
1679 define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1680 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
1681 ; CHECK: # %bb.0: # %entry
1682 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1683 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1686 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
1687 <vscale x 1 x i16> %0,
1688 <vscale x 1 x i16> %0,
1690 <vscale x 1 x i1> %2,
1693 ret <vscale x 1 x i16> %a
1696 define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1697 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
1698 ; CHECK: # %bb.0: # %entry
1699 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1700 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1703 %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
1704 <vscale x 2 x i16> %0,
1705 <vscale x 2 x i16> %0,
1707 <vscale x 2 x i1> %2,
1710 ret <vscale x 2 x i16> %a
1713 define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1714 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
1715 ; CHECK: # %bb.0: # %entry
1716 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1717 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1720 %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
1721 <vscale x 4 x i16> %0,
1722 <vscale x 4 x i16> %0,
1724 <vscale x 4 x i1> %2,
1727 ret <vscale x 4 x i16> %a
1730 define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1731 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
1732 ; CHECK: # %bb.0: # %entry
1733 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1734 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1737 %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
1738 <vscale x 8 x i16> %0,
1739 <vscale x 8 x i16> %0,
1741 <vscale x 8 x i1> %2,
1744 ret <vscale x 8 x i16> %a
1747 define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1748 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
1749 ; CHECK: # %bb.0: # %entry
1750 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1751 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1754 %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
1755 <vscale x 16 x i16> %0,
1756 <vscale x 16 x i16> %0,
1758 <vscale x 16 x i1> %2,
1761 ret <vscale x 16 x i16> %a
1764 define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1765 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
1766 ; CHECK: # %bb.0: # %entry
1767 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1768 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1771 %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
1772 <vscale x 32 x i16> %0,
1773 <vscale x 32 x i16> %0,
1775 <vscale x 32 x i1> %2,
1778 ret <vscale x 32 x i16> %a
1781 define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1782 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
1783 ; CHECK: # %bb.0: # %entry
1784 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1785 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1788 %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
1789 <vscale x 1 x i32> %0,
1790 <vscale x 1 x i32> %0,
1792 <vscale x 1 x i1> %2,
1795 ret <vscale x 1 x i32> %a
1798 define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1799 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
1800 ; CHECK: # %bb.0: # %entry
1801 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1802 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1805 %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
1806 <vscale x 2 x i32> %0,
1807 <vscale x 2 x i32> %0,
1809 <vscale x 2 x i1> %2,
1812 ret <vscale x 2 x i32> %a
1815 define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1816 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
1817 ; CHECK: # %bb.0: # %entry
1818 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1819 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1822 %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1823 <vscale x 4 x i32> %0,
1824 <vscale x 4 x i32> %0,
1826 <vscale x 4 x i1> %2,
1829 ret <vscale x 4 x i32> %a
1832 define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1833 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
1834 ; CHECK: # %bb.0: # %entry
1835 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1836 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1839 %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1840 <vscale x 8 x i32> %0,
1841 <vscale x 8 x i32> %0,
1843 <vscale x 8 x i1> %2,
1846 ret <vscale x 8 x i32> %a
1849 define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1850 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
1851 ; CHECK: # %bb.0: # %entry
1852 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1853 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1856 %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1857 <vscale x 16 x i32> %0,
1858 <vscale x 16 x i32> %0,
1860 <vscale x 16 x i1> %2,
1863 ret <vscale x 16 x i32> %a
1866 define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1867 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
1868 ; CHECK: # %bb.0: # %entry
1869 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1870 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1873 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1874 <vscale x 1 x i64> %0,
1875 <vscale x 1 x i64> %0,
1877 <vscale x 1 x i1> %2,
1880 ret <vscale x 1 x i64> %a
1883 define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1884 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
1885 ; CHECK: # %bb.0: # %entry
1886 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1887 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1890 %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1891 <vscale x 2 x i64> %0,
1892 <vscale x 2 x i64> %0,
1894 <vscale x 2 x i1> %2,
1897 ret <vscale x 2 x i64> %a
1900 define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1901 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
1902 ; CHECK: # %bb.0: # %entry
1903 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1904 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1907 %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1908 <vscale x 4 x i64> %0,
1909 <vscale x 4 x i64> %0,
1911 <vscale x 4 x i1> %2,
1914 ret <vscale x 4 x i64> %a
1917 define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1918 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
1919 ; CHECK: # %bb.0: # %entry
1920 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1921 ; CHECK-NEXT: vwadd.wx v8, v8, a0, v0.t
1924 %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1925 <vscale x 8 x i64> %0,
1926 <vscale x 8 x i64> %0,
1928 <vscale x 8 x i1> %2,
1931 ret <vscale x 8 x i64> %a
1934 define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1935 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
1936 ; CHECK: # %bb.0: # %entry
1937 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1938 ; CHECK-NEXT: vwadd.wv v10, v9, v8
1939 ; CHECK-NEXT: vmv1r.v v8, v10
1942 %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
1943 <vscale x 1 x i16> undef,
1944 <vscale x 1 x i16> %1,
1945 <vscale x 1 x i8> %0,
1948 ret <vscale x 1 x i16> %a
1951 define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1952 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
1953 ; CHECK: # %bb.0: # %entry
1954 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1955 ; CHECK-NEXT: vwadd.wv v10, v9, v8
1956 ; CHECK-NEXT: vmv1r.v v8, v10
1959 %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
1960 <vscale x 2 x i16> undef,
1961 <vscale x 2 x i16> %1,
1962 <vscale x 2 x i8> %0,
1965 ret <vscale x 2 x i16> %a
1968 define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1969 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
1970 ; CHECK: # %bb.0: # %entry
1971 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1972 ; CHECK-NEXT: vwadd.wv v10, v9, v8
1973 ; CHECK-NEXT: vmv1r.v v8, v10
1976 %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
1977 <vscale x 4 x i16> undef,
1978 <vscale x 4 x i16> %1,
1979 <vscale x 4 x i8> %0,
1982 ret <vscale x 4 x i16> %a
1985 define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1986 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
1987 ; CHECK: # %bb.0: # %entry
1988 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1989 ; CHECK-NEXT: vwadd.wv v12, v10, v8
1990 ; CHECK-NEXT: vmv2r.v v8, v12
1993 %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
1994 <vscale x 8 x i16> undef,
1995 <vscale x 8 x i16> %1,
1996 <vscale x 8 x i8> %0,
1999 ret <vscale x 8 x i16> %a
2002 define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2003 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
2004 ; CHECK: # %bb.0: # %entry
2005 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2006 ; CHECK-NEXT: vwadd.wv v16, v12, v8
2007 ; CHECK-NEXT: vmv4r.v v8, v16
2010 %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
2011 <vscale x 16 x i16> undef,
2012 <vscale x 16 x i16> %1,
2013 <vscale x 16 x i8> %0,
2016 ret <vscale x 16 x i16> %a
2019 define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2020 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
2021 ; CHECK: # %bb.0: # %entry
2022 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2023 ; CHECK-NEXT: vwadd.wv v24, v16, v8
2024 ; CHECK-NEXT: vmv8r.v v8, v24
2027 %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
2028 <vscale x 32 x i16> undef,
2029 <vscale x 32 x i16> %1,
2030 <vscale x 32 x i8> %0,
2033 ret <vscale x 32 x i16> %a
2036 define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2037 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
2038 ; CHECK: # %bb.0: # %entry
2039 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2040 ; CHECK-NEXT: vwadd.wv v10, v9, v8
2041 ; CHECK-NEXT: vmv1r.v v8, v10
2044 %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
2045 <vscale x 1 x i32> undef,
2046 <vscale x 1 x i32> %1,
2047 <vscale x 1 x i16> %0,
2050 ret <vscale x 1 x i32> %a
2053 define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
2054 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
2055 ; CHECK: # %bb.0: # %entry
2056 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2057 ; CHECK-NEXT: vwadd.wv v10, v9, v8
2058 ; CHECK-NEXT: vmv1r.v v8, v10
2061 %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
2062 <vscale x 2 x i32> undef,
2063 <vscale x 2 x i32> %1,
2064 <vscale x 2 x i16> %0,
2067 ret <vscale x 2 x i32> %a
2070 define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
2071 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
2072 ; CHECK: # %bb.0: # %entry
2073 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2074 ; CHECK-NEXT: vwadd.wv v12, v10, v8
2075 ; CHECK-NEXT: vmv2r.v v8, v12
2078 %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
2079 <vscale x 4 x i32> undef,
2080 <vscale x 4 x i32> %1,
2081 <vscale x 4 x i16> %0,
2084 ret <vscale x 4 x i32> %a
2087 define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
2088 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
2089 ; CHECK: # %bb.0: # %entry
2090 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2091 ; CHECK-NEXT: vwadd.wv v16, v12, v8
2092 ; CHECK-NEXT: vmv4r.v v8, v16
2095 %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
2096 <vscale x 8 x i32> undef,
2097 <vscale x 8 x i32> %1,
2098 <vscale x 8 x i16> %0,
2101 ret <vscale x 8 x i32> %a
2104 define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
2105 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
2106 ; CHECK: # %bb.0: # %entry
2107 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2108 ; CHECK-NEXT: vwadd.wv v10, v9, v8
2109 ; CHECK-NEXT: vmv1r.v v8, v10
2112 %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
2113 <vscale x 1 x i64> undef,
2114 <vscale x 1 x i64> %1,
2115 <vscale x 1 x i32> %0,
2118 ret <vscale x 1 x i64> %a
2121 define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
2122 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
2123 ; CHECK: # %bb.0: # %entry
2124 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2125 ; CHECK-NEXT: vwadd.wv v12, v10, v8
2126 ; CHECK-NEXT: vmv2r.v v8, v12
2129 %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
2130 <vscale x 2 x i64> undef,
2131 <vscale x 2 x i64> %1,
2132 <vscale x 2 x i32> %0,
2135 ret <vscale x 2 x i64> %a
2138 define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
2139 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
2140 ; CHECK: # %bb.0: # %entry
2141 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2142 ; CHECK-NEXT: vwadd.wv v16, v12, v8
2143 ; CHECK-NEXT: vmv4r.v v8, v16
2146 %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
2147 <vscale x 4 x i64> undef,
2148 <vscale x 4 x i64> %1,
2149 <vscale x 4 x i32> %0,
2152 ret <vscale x 4 x i64> %a
2155 define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
2156 ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
2157 ; CHECK: # %bb.0: # %entry
2158 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2159 ; CHECK-NEXT: vwadd.wv v24, v16, v8
2160 ; CHECK-NEXT: vmv8r.v v8, v24
2163 %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
2164 <vscale x 8 x i64> undef,
2165 <vscale x 8 x i64> %1,
2166 <vscale x 8 x i32> %0,
2169 ret <vscale x 8 x i64> %a