1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
14 define <vscale x 1 x i16> @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
15 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
18 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9
21 %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
22 <vscale x 1 x i16> %0,
27 ret <vscale x 1 x i16> %a
30 declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
37 define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
41 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9, v0.t
44 %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
45 <vscale x 1 x i16> %0,
51 ret <vscale x 1 x i16> %a
54 declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
61 define <vscale x 2 x i16> @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
62 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
65 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9
68 %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
69 <vscale x 2 x i16> %0,
74 ret <vscale x 2 x i16> %a
77 declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
84 define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
88 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9, v0.t
91 %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
92 <vscale x 2 x i16> %0,
98 ret <vscale x 2 x i16> %a
101 declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
108 define <vscale x 4 x i16> @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
109 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
112 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9
115 %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
116 <vscale x 4 x i16> %0,
118 <vscale x 4 x i8> %2,
121 ret <vscale x 4 x i16> %a
124 declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
131 define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
135 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9, v0.t
138 %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
139 <vscale x 4 x i16> %0,
141 <vscale x 4 x i8> %2,
142 <vscale x 4 x i1> %3,
145 ret <vscale x 4 x i16> %a
148 declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
155 define <vscale x 8 x i16> @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
156 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8:
157 ; CHECK: # %bb.0: # %entry
158 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
159 ; CHECK-NEXT: vwmaccus.vx v8, a0, v10
162 %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
163 <vscale x 8 x i16> %0,
165 <vscale x 8 x i8> %2,
168 ret <vscale x 8 x i16> %a
171 declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
178 define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
182 ; CHECK-NEXT: vwmaccus.vx v8, a0, v10, v0.t
185 %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
186 <vscale x 8 x i16> %0,
188 <vscale x 8 x i8> %2,
189 <vscale x 8 x i1> %3,
192 ret <vscale x 8 x i16> %a
195 declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
202 define <vscale x 16 x i16> @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
203 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
206 ; CHECK-NEXT: vwmaccus.vx v8, a0, v12
209 %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
210 <vscale x 16 x i16> %0,
212 <vscale x 16 x i8> %2,
215 ret <vscale x 16 x i16> %a
218 declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
225 define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
227 ; CHECK: # %bb.0: # %entry
228 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
229 ; CHECK-NEXT: vwmaccus.vx v8, a0, v12, v0.t
232 %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
233 <vscale x 16 x i16> %0,
235 <vscale x 16 x i8> %2,
236 <vscale x 16 x i1> %3,
239 ret <vscale x 16 x i16> %a
242 declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
249 define <vscale x 32 x i16> @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
250 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8:
251 ; CHECK: # %bb.0: # %entry
252 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
253 ; CHECK-NEXT: vwmaccus.vx v8, a0, v16
256 %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
257 <vscale x 32 x i16> %0,
259 <vscale x 32 x i8> %2,
262 ret <vscale x 32 x i16> %a
265 declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
272 define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
276 ; CHECK-NEXT: vwmaccus.vx v8, a0, v16, v0.t
279 %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
280 <vscale x 32 x i16> %0,
282 <vscale x 32 x i8> %2,
283 <vscale x 32 x i1> %3,
286 ret <vscale x 32 x i16> %a
289 declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
296 define <vscale x 1 x i32> @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
297 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16:
298 ; CHECK: # %bb.0: # %entry
299 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
300 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9
303 %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
304 <vscale x 1 x i32> %0,
306 <vscale x 1 x i16> %2,
309 ret <vscale x 1 x i32> %a
312 declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
319 define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
320 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
323 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9, v0.t
326 %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
327 <vscale x 1 x i32> %0,
329 <vscale x 1 x i16> %2,
330 <vscale x 1 x i1> %3,
333 ret <vscale x 1 x i32> %a
336 declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
343 define <vscale x 2 x i32> @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
344 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16:
345 ; CHECK: # %bb.0: # %entry
346 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
347 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9
350 %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
351 <vscale x 2 x i32> %0,
353 <vscale x 2 x i16> %2,
356 ret <vscale x 2 x i32> %a
359 declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
366 define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
367 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
368 ; CHECK: # %bb.0: # %entry
369 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
370 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9, v0.t
373 %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
374 <vscale x 2 x i32> %0,
376 <vscale x 2 x i16> %2,
377 <vscale x 2 x i1> %3,
380 ret <vscale x 2 x i32> %a
383 declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
390 define <vscale x 4 x i32> @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
391 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
394 ; CHECK-NEXT: vwmaccus.vx v8, a0, v10
397 %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
398 <vscale x 4 x i32> %0,
400 <vscale x 4 x i16> %2,
403 ret <vscale x 4 x i32> %a
406 declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
413 define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
414 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
415 ; CHECK: # %bb.0: # %entry
416 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
417 ; CHECK-NEXT: vwmaccus.vx v8, a0, v10, v0.t
420 %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
421 <vscale x 4 x i32> %0,
423 <vscale x 4 x i16> %2,
424 <vscale x 4 x i1> %3,
427 ret <vscale x 4 x i32> %a
430 declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
437 define <vscale x 8 x i32> @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
438 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16:
439 ; CHECK: # %bb.0: # %entry
440 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
441 ; CHECK-NEXT: vwmaccus.vx v8, a0, v12
444 %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
445 <vscale x 8 x i32> %0,
447 <vscale x 8 x i16> %2,
450 ret <vscale x 8 x i32> %a
453 declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
460 define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
461 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
462 ; CHECK: # %bb.0: # %entry
463 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
464 ; CHECK-NEXT: vwmaccus.vx v8, a0, v12, v0.t
467 %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
468 <vscale x 8 x i32> %0,
470 <vscale x 8 x i16> %2,
471 <vscale x 8 x i1> %3,
474 ret <vscale x 8 x i32> %a
477 declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
484 define <vscale x 16 x i32> @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
485 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16:
486 ; CHECK: # %bb.0: # %entry
487 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
488 ; CHECK-NEXT: vwmaccus.vx v8, a0, v16
491 %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
492 <vscale x 16 x i32> %0,
494 <vscale x 16 x i16> %2,
497 ret <vscale x 16 x i32> %a
500 declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
507 define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
508 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16:
509 ; CHECK: # %bb.0: # %entry
510 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
511 ; CHECK-NEXT: vwmaccus.vx v8, a0, v16, v0.t
514 %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
515 <vscale x 16 x i32> %0,
517 <vscale x 16 x i16> %2,
518 <vscale x 16 x i1> %3,
521 ret <vscale x 16 x i32> %a
524 declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
531 define <vscale x 1 x i64> @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
532 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32:
533 ; CHECK: # %bb.0: # %entry
534 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
535 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9
538 %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
539 <vscale x 1 x i64> %0,
541 <vscale x 1 x i32> %2,
544 ret <vscale x 1 x i64> %a
547 declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
554 define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
555 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32:
556 ; CHECK: # %bb.0: # %entry
557 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
558 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9, v0.t
561 %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
562 <vscale x 1 x i64> %0,
564 <vscale x 1 x i32> %2,
565 <vscale x 1 x i1> %3,
568 ret <vscale x 1 x i64> %a
571 declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
578 define <vscale x 2 x i64> @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
579 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32:
580 ; CHECK: # %bb.0: # %entry
581 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
582 ; CHECK-NEXT: vwmaccus.vx v8, a0, v10
585 %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
586 <vscale x 2 x i64> %0,
588 <vscale x 2 x i32> %2,
591 ret <vscale x 2 x i64> %a
594 declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
601 define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
602 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32:
603 ; CHECK: # %bb.0: # %entry
604 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
605 ; CHECK-NEXT: vwmaccus.vx v8, a0, v10, v0.t
608 %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
609 <vscale x 2 x i64> %0,
611 <vscale x 2 x i32> %2,
612 <vscale x 2 x i1> %3,
615 ret <vscale x 2 x i64> %a
618 declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
625 define <vscale x 4 x i64> @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
626 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32:
627 ; CHECK: # %bb.0: # %entry
628 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
629 ; CHECK-NEXT: vwmaccus.vx v8, a0, v12
632 %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
633 <vscale x 4 x i64> %0,
635 <vscale x 4 x i32> %2,
638 ret <vscale x 4 x i64> %a
641 declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
648 define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
649 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32:
650 ; CHECK: # %bb.0: # %entry
651 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
652 ; CHECK-NEXT: vwmaccus.vx v8, a0, v12, v0.t
655 %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
656 <vscale x 4 x i64> %0,
658 <vscale x 4 x i32> %2,
659 <vscale x 4 x i1> %3,
662 ret <vscale x 4 x i64> %a
665 declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
672 define <vscale x 8 x i64> @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
673 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32:
674 ; CHECK: # %bb.0: # %entry
675 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
676 ; CHECK-NEXT: vwmaccus.vx v8, a0, v16
679 %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
680 <vscale x 8 x i64> %0,
682 <vscale x 8 x i32> %2,
685 ret <vscale x 8 x i64> %a
688 declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
695 define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
696 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32:
697 ; CHECK: # %bb.0: # %entry
698 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
699 ; CHECK-NEXT: vwmaccus.vx v8, a0, v16, v0.t
702 %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
703 <vscale x 8 x i64> %0,
705 <vscale x 8 x i32> %2,
706 <vscale x 8 x i1> %3,
709 ret <vscale x 8 x i64> %a