Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / non-overloaded / vwmaccu.c
blobfa42f4cb612e5c30497cc664c10d3f6bd5bab7a2
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwmaccu_vv_u16mf4
11 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 3)
14 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
16 vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
17 return __riscv_vwmaccu_vv_u16mf4(vd, vs1, vs2, vl);
20 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwmaccu_vx_u16mf4
21 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], i8 [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 3)
24 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
26 vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
27 return __riscv_vwmaccu_vx_u16mf4(vd, rs1, vs2, vl);
30 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwmaccu_vv_u16mf2
31 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 3)
34 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
36 vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
37 return __riscv_vwmaccu_vv_u16mf2(vd, vs1, vs2, vl);
40 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwmaccu_vx_u16mf2
41 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], i8 [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 3)
44 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
46 vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
47 return __riscv_vwmaccu_vx_u16mf2(vd, rs1, vs2, vl);
50 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwmaccu_vv_u16m1
51 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 3)
54 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
56 vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
57 return __riscv_vwmaccu_vv_u16m1(vd, vs1, vs2, vl);
60 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwmaccu_vx_u16m1
61 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], i8 [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 3)
64 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
66 vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
67 return __riscv_vwmaccu_vx_u16m1(vd, rs1, vs2, vl);
70 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwmaccu_vv_u16m2
71 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 3)
74 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
76 vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
77 return __riscv_vwmaccu_vv_u16m2(vd, vs1, vs2, vl);
80 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwmaccu_vx_u16m2
81 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], i8 [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 3)
84 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
86 vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
87 return __riscv_vwmaccu_vx_u16m2(vd, rs1, vs2, vl);
90 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwmaccu_vv_u16m4
91 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 3)
94 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
96 vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
97 return __riscv_vwmaccu_vv_u16m4(vd, vs1, vs2, vl);
100 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwmaccu_vx_u16m4
101 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], i8 [[RS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 3)
104 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
106 vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
107 return __riscv_vwmaccu_vx_u16m4(vd, rs1, vs2, vl);
110 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwmaccu_vv_u16m8
111 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 3)
114 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
116 vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
117 return __riscv_vwmaccu_vv_u16m8(vd, vs1, vs2, vl);
120 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwmaccu_vx_u16m8
121 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], i8 [[RS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 3)
124 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
126 vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
127 return __riscv_vwmaccu_vx_u16m8(vd, rs1, vs2, vl);
130 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwmaccu_vv_u32mf2
131 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 3)
134 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
136 vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
137 return __riscv_vwmaccu_vv_u32mf2(vd, vs1, vs2, vl);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwmaccu_vx_u32mf2
141 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], i16 [[RS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 3)
144 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
146 vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
147 return __riscv_vwmaccu_vx_u32mf2(vd, rs1, vs2, vl);
150 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwmaccu_vv_u32m1
151 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 3)
154 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
156 vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
157 return __riscv_vwmaccu_vv_u32m1(vd, vs1, vs2, vl);
160 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwmaccu_vx_u32m1
161 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], i16 [[RS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 3)
164 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
166 vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
167 return __riscv_vwmaccu_vx_u32m1(vd, rs1, vs2, vl);
170 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwmaccu_vv_u32m2
171 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 3)
174 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
176 vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
177 return __riscv_vwmaccu_vv_u32m2(vd, vs1, vs2, vl);
180 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwmaccu_vx_u32m2
181 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], i16 [[RS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 3)
184 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
186 vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
187 return __riscv_vwmaccu_vx_u32m2(vd, rs1, vs2, vl);
190 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwmaccu_vv_u32m4
191 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 3)
194 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
196 vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
197 return __riscv_vwmaccu_vv_u32m4(vd, vs1, vs2, vl);
200 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwmaccu_vx_u32m4
201 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], i16 [[RS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 3)
204 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
206 vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
207 return __riscv_vwmaccu_vx_u32m4(vd, rs1, vs2, vl);
210 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwmaccu_vv_u32m8
211 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 3)
214 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
216 vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
217 return __riscv_vwmaccu_vv_u32m8(vd, vs1, vs2, vl);
220 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwmaccu_vx_u32m8
221 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], i16 [[RS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 3)
224 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
226 vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
227 return __riscv_vwmaccu_vx_u32m8(vd, rs1, vs2, vl);
230 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vwmaccu_vv_u64m1
231 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 3)
234 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
236 vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
237 return __riscv_vwmaccu_vv_u64m1(vd, vs1, vs2, vl);
240 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vwmaccu_vx_u64m1
241 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], i32 noundef signext [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[VD]], i32 [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 3)
244 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
246 vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
247 return __riscv_vwmaccu_vx_u64m1(vd, rs1, vs2, vl);
250 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vwmaccu_vv_u64m2
251 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 3)
254 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
256 vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
257 return __riscv_vwmaccu_vv_u64m2(vd, vs1, vs2, vl);
260 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vwmaccu_vx_u64m2
261 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], i32 noundef signext [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[VD]], i32 [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 3)
264 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
266 vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
267 return __riscv_vwmaccu_vx_u64m2(vd, rs1, vs2, vl);
270 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vwmaccu_vv_u64m4
271 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 3)
274 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
276 vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
277 return __riscv_vwmaccu_vv_u64m4(vd, vs1, vs2, vl);
280 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vwmaccu_vx_u64m4
281 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], i32 noundef signext [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[VD]], i32 [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 3)
284 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
286 vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
287 return __riscv_vwmaccu_vx_u64m4(vd, rs1, vs2, vl);
290 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vwmaccu_vv_u64m8
291 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 3)
294 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
296 vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
297 return __riscv_vwmaccu_vv_u64m8(vd, vs1, vs2, vl);
300 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vwmaccu_vx_u64m8
301 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], i32 noundef signext [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[VD]], i32 [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 3)
304 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
306 vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
307 return __riscv_vwmaccu_vx_u64m8(vd, rs1, vs2, vl);
310 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwmaccu_vv_u16mf4_m
311 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
314 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
316 vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) {
317 return __riscv_vwmaccu_vv_u16mf4_m(mask, vd, vs1, vs2, vl);
320 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwmaccu_vx_u16mf4_m
321 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], i8 [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
324 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
326 vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) {
327 return __riscv_vwmaccu_vx_u16mf4_m(mask, vd, rs1, vs2, vl);
330 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwmaccu_vv_u16mf2_m
331 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
334 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
336 vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) {
337 return __riscv_vwmaccu_vv_u16mf2_m(mask, vd, vs1, vs2, vl);
340 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwmaccu_vx_u16mf2_m
341 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], i8 [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
344 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
346 vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) {
347 return __riscv_vwmaccu_vx_u16mf2_m(mask, vd, rs1, vs2, vl);
350 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwmaccu_vv_u16m1_m
351 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
354 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
356 vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) {
357 return __riscv_vwmaccu_vv_u16m1_m(mask, vd, vs1, vs2, vl);
360 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwmaccu_vx_u16m1_m
361 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], i8 [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
364 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
366 vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) {
367 return __riscv_vwmaccu_vx_u16m1_m(mask, vd, rs1, vs2, vl);
370 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwmaccu_vv_u16m2_m
371 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
374 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
376 vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) {
377 return __riscv_vwmaccu_vv_u16m2_m(mask, vd, vs1, vs2, vl);
380 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwmaccu_vx_u16m2_m
381 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], i8 [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
384 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
386 vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) {
387 return __riscv_vwmaccu_vx_u16m2_m(mask, vd, rs1, vs2, vl);
390 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwmaccu_vv_u16m4_m
391 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
394 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
396 vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) {
397 return __riscv_vwmaccu_vv_u16m4_m(mask, vd, vs1, vs2, vl);
400 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwmaccu_vx_u16m4_m
401 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], i8 [[RS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
404 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
406 vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) {
407 return __riscv_vwmaccu_vx_u16m4_m(mask, vd, rs1, vs2, vl);
410 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwmaccu_vv_u16m8_m
411 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
414 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
416 vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) {
417 return __riscv_vwmaccu_vv_u16m8_m(mask, vd, vs1, vs2, vl);
420 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwmaccu_vx_u16m8_m
421 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[VD:%.*]], i8 noundef zeroext [[RS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], i8 [[RS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
424 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
426 vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) {
427 return __riscv_vwmaccu_vx_u16m8_m(mask, vd, rs1, vs2, vl);
430 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwmaccu_vv_u32mf2_m
431 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
434 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
436 vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) {
437 return __riscv_vwmaccu_vv_u32mf2_m(mask, vd, vs1, vs2, vl);
440 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwmaccu_vx_u32mf2_m
441 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], i16 [[RS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
444 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
446 vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) {
447 return __riscv_vwmaccu_vx_u32mf2_m(mask, vd, rs1, vs2, vl);
450 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwmaccu_vv_u32m1_m
451 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
454 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
456 vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) {
457 return __riscv_vwmaccu_vv_u32m1_m(mask, vd, vs1, vs2, vl);
460 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwmaccu_vx_u32m1_m
461 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
462 // CHECK-RV64-NEXT: entry:
463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], i16 [[RS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
464 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
466 vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) {
467 return __riscv_vwmaccu_vx_u32m1_m(mask, vd, rs1, vs2, vl);
470 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwmaccu_vv_u32m2_m
471 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
474 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
476 vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) {
477 return __riscv_vwmaccu_vv_u32m2_m(mask, vd, vs1, vs2, vl);
480 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwmaccu_vx_u32m2_m
481 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], i16 [[RS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
484 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
486 vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) {
487 return __riscv_vwmaccu_vx_u32m2_m(mask, vd, rs1, vs2, vl);
490 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwmaccu_vv_u32m4_m
491 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
492 // CHECK-RV64-NEXT: entry:
493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
494 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
496 vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) {
497 return __riscv_vwmaccu_vv_u32m4_m(mask, vd, vs1, vs2, vl);
500 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwmaccu_vx_u32m4_m
501 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
502 // CHECK-RV64-NEXT: entry:
503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], i16 [[RS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
504 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
506 vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) {
507 return __riscv_vwmaccu_vx_u32m4_m(mask, vd, rs1, vs2, vl);
510 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwmaccu_vv_u32m8_m
511 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
512 // CHECK-RV64-NEXT: entry:
513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
514 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
516 vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) {
517 return __riscv_vwmaccu_vv_u32m8_m(mask, vd, vs1, vs2, vl);
520 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwmaccu_vx_u32m8_m
521 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[VD:%.*]], i16 noundef zeroext [[RS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], i16 [[RS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
524 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
526 vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) {
527 return __riscv_vwmaccu_vx_u32m8_m(mask, vd, rs1, vs2, vl);
530 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vwmaccu_vv_u64m1_m
531 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
534 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
536 vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) {
537 return __riscv_vwmaccu_vv_u64m1_m(mask, vd, vs1, vs2, vl);
540 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vwmaccu_vx_u64m1_m
541 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[VD:%.*]], i32 noundef signext [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
542 // CHECK-RV64-NEXT: entry:
543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[VD]], i32 [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
544 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
546 vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) {
547 return __riscv_vwmaccu_vx_u64m1_m(mask, vd, rs1, vs2, vl);
550 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vwmaccu_vv_u64m2_m
551 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
552 // CHECK-RV64-NEXT: entry:
553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
554 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
556 vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) {
557 return __riscv_vwmaccu_vv_u64m2_m(mask, vd, vs1, vs2, vl);
560 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vwmaccu_vx_u64m2_m
561 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[VD:%.*]], i32 noundef signext [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
562 // CHECK-RV64-NEXT: entry:
563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[VD]], i32 [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
564 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
566 vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) {
567 return __riscv_vwmaccu_vx_u64m2_m(mask, vd, rs1, vs2, vl);
570 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vwmaccu_vv_u64m4_m
571 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
572 // CHECK-RV64-NEXT: entry:
573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
574 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
576 vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) {
577 return __riscv_vwmaccu_vv_u64m4_m(mask, vd, vs1, vs2, vl);
580 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vwmaccu_vx_u64m4_m
581 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[VD:%.*]], i32 noundef signext [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[VD]], i32 [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
584 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
586 vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) {
587 return __riscv_vwmaccu_vx_u64m4_m(mask, vd, rs1, vs2, vl);
590 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vwmaccu_vv_u64m8_m
591 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
594 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
596 vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) {
597 return __riscv_vwmaccu_vv_u64m8_m(mask, vd, vs1, vs2, vl);
600 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vwmaccu_vx_u64m8_m
601 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[VD:%.*]], i32 noundef signext [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
602 // CHECK-RV64-NEXT: entry:
603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[VD]], i32 [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
604 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
606 vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) {
607 return __riscv_vwmaccu_vx_u64m8_m(mask, vd, rs1, vs2, vl);