[AMDGPU] Implement IR variant of isFMAFasterThanFMulAndFAdd (#121465)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / overloaded / vcpopv.c
blobc2e043d6c103985abd1c16d02208208fe9d1e798
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +zvbb \
5 // RUN: -target-feature +zvbc \
6 // RUN: -target-feature +zvkb \
7 // RUN: -target-feature +zvkg \
8 // RUN: -target-feature +zvkned \
9 // RUN: -target-feature +zvknhb \
10 // RUN: -target-feature +zvksed \
11 // RUN: -target-feature +zvksh \
12 // RUN: -disable-O0-optnone \
13 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
14 // RUN: FileCheck %s
16 #include <riscv_vector.h>
18 // CHECK-LABEL: @test_vcpop_v_u8mf8(
19 // CHECK-NEXT: entry:
20 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
21 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
23 vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) {
24 return __riscv_vcpop(vs2, vl);
27 // CHECK-LABEL: @test_vcpop_v_u8mf4(
28 // CHECK-NEXT: entry:
29 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
30 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
32 vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) {
33 return __riscv_vcpop(vs2, vl);
36 // CHECK-LABEL: @test_vcpop_v_u8mf2(
37 // CHECK-NEXT: entry:
38 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
39 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
41 vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) {
42 return __riscv_vcpop(vs2, vl);
45 // CHECK-LABEL: @test_vcpop_v_u8m1(
46 // CHECK-NEXT: entry:
47 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
48 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
50 vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) {
51 return __riscv_vcpop(vs2, vl);
54 // CHECK-LABEL: @test_vcpop_v_u8m2(
55 // CHECK-NEXT: entry:
56 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
57 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
59 vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) {
60 return __riscv_vcpop(vs2, vl);
63 // CHECK-LABEL: @test_vcpop_v_u8m4(
64 // CHECK-NEXT: entry:
65 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
66 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
68 vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) {
69 return __riscv_vcpop(vs2, vl);
72 // CHECK-LABEL: @test_vcpop_v_u8m8(
73 // CHECK-NEXT: entry:
74 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
75 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
77 vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) {
78 return __riscv_vcpop(vs2, vl);
81 // CHECK-LABEL: @test_vcpop_v_u16mf4(
82 // CHECK-NEXT: entry:
83 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
84 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
86 vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) {
87 return __riscv_vcpop(vs2, vl);
90 // CHECK-LABEL: @test_vcpop_v_u16mf2(
91 // CHECK-NEXT: entry:
92 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
93 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
95 vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) {
96 return __riscv_vcpop(vs2, vl);
99 // CHECK-LABEL: @test_vcpop_v_u16m1(
100 // CHECK-NEXT: entry:
101 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
102 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
104 vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) {
105 return __riscv_vcpop(vs2, vl);
108 // CHECK-LABEL: @test_vcpop_v_u16m2(
109 // CHECK-NEXT: entry:
110 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
111 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
113 vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) {
114 return __riscv_vcpop(vs2, vl);
117 // CHECK-LABEL: @test_vcpop_v_u16m4(
118 // CHECK-NEXT: entry:
119 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
120 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
122 vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) {
123 return __riscv_vcpop(vs2, vl);
126 // CHECK-LABEL: @test_vcpop_v_u16m8(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
129 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
131 vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) {
132 return __riscv_vcpop(vs2, vl);
135 // CHECK-LABEL: @test_vcpop_v_u32mf2(
136 // CHECK-NEXT: entry:
137 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
138 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
140 vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) {
141 return __riscv_vcpop(vs2, vl);
144 // CHECK-LABEL: @test_vcpop_v_u32m1(
145 // CHECK-NEXT: entry:
146 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
147 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
149 vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) {
150 return __riscv_vcpop(vs2, vl);
153 // CHECK-LABEL: @test_vcpop_v_u32m2(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
156 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
158 vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) {
159 return __riscv_vcpop(vs2, vl);
162 // CHECK-LABEL: @test_vcpop_v_u32m4(
163 // CHECK-NEXT: entry:
164 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
165 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
167 vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) {
168 return __riscv_vcpop(vs2, vl);
171 // CHECK-LABEL: @test_vcpop_v_u32m8(
172 // CHECK-NEXT: entry:
173 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
174 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
176 vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) {
177 return __riscv_vcpop(vs2, vl);
180 // CHECK-LABEL: @test_vcpop_v_u64m1(
181 // CHECK-NEXT: entry:
182 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
183 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
185 vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) {
186 return __riscv_vcpop(vs2, vl);
189 // CHECK-LABEL: @test_vcpop_v_u64m2(
190 // CHECK-NEXT: entry:
191 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
192 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
194 vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) {
195 return __riscv_vcpop(vs2, vl);
198 // CHECK-LABEL: @test_vcpop_v_u64m4(
199 // CHECK-NEXT: entry:
200 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
201 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
203 vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) {
204 return __riscv_vcpop(vs2, vl);
207 // CHECK-LABEL: @test_vcpop_v_u64m8(
208 // CHECK-NEXT: entry:
209 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
210 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
212 vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) {
213 return __riscv_vcpop(vs2, vl);
216 // CHECK-LABEL: @test_vcpop_v_u8mf8_m(
217 // CHECK-NEXT: entry:
218 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcpopv.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
219 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
221 vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t mask, vuint8mf8_t vs2, size_t vl) {
222 return __riscv_vcpop(mask, vs2, vl);
225 // CHECK-LABEL: @test_vcpop_v_u8mf4_m(
226 // CHECK-NEXT: entry:
227 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcpopv.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
228 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
230 vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t mask, vuint8mf4_t vs2, size_t vl) {
231 return __riscv_vcpop(mask, vs2, vl);
234 // CHECK-LABEL: @test_vcpop_v_u8mf2_m(
235 // CHECK-NEXT: entry:
236 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcpopv.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
237 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
239 vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t mask, vuint8mf2_t vs2, size_t vl) {
240 return __riscv_vcpop(mask, vs2, vl);
243 // CHECK-LABEL: @test_vcpop_v_u8m1_m(
244 // CHECK-NEXT: entry:
245 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcpopv.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
246 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
248 vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t mask, vuint8m1_t vs2, size_t vl) {
249 return __riscv_vcpop(mask, vs2, vl);
252 // CHECK-LABEL: @test_vcpop_v_u8m2_m(
253 // CHECK-NEXT: entry:
254 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcpopv.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
255 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
257 vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t mask, vuint8m2_t vs2, size_t vl) {
258 return __riscv_vcpop(mask, vs2, vl);
261 // CHECK-LABEL: @test_vcpop_v_u8m4_m(
262 // CHECK-NEXT: entry:
263 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcpopv.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
264 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
266 vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t mask, vuint8m4_t vs2, size_t vl) {
267 return __riscv_vcpop(mask, vs2, vl);
270 // CHECK-LABEL: @test_vcpop_v_u8m8_m(
271 // CHECK-NEXT: entry:
272 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcpopv.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
273 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
275 vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t mask, vuint8m8_t vs2, size_t vl) {
276 return __riscv_vcpop(mask, vs2, vl);
279 // CHECK-LABEL: @test_vcpop_v_u16mf4_m(
280 // CHECK-NEXT: entry:
281 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcpopv.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
282 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
284 vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t mask, vuint16mf4_t vs2, size_t vl) {
285 return __riscv_vcpop(mask, vs2, vl);
288 // CHECK-LABEL: @test_vcpop_v_u16mf2_m(
289 // CHECK-NEXT: entry:
290 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcpopv.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
291 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
293 vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t mask, vuint16mf2_t vs2, size_t vl) {
294 return __riscv_vcpop(mask, vs2, vl);
297 // CHECK-LABEL: @test_vcpop_v_u16m1_m(
298 // CHECK-NEXT: entry:
299 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcpopv.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
300 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
302 vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t mask, vuint16m1_t vs2, size_t vl) {
303 return __riscv_vcpop(mask, vs2, vl);
306 // CHECK-LABEL: @test_vcpop_v_u16m2_m(
307 // CHECK-NEXT: entry:
308 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcpopv.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
309 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
311 vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t mask, vuint16m2_t vs2, size_t vl) {
312 return __riscv_vcpop(mask, vs2, vl);
315 // CHECK-LABEL: @test_vcpop_v_u16m4_m(
316 // CHECK-NEXT: entry:
317 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcpopv.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
318 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
320 vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t mask, vuint16m4_t vs2, size_t vl) {
321 return __riscv_vcpop(mask, vs2, vl);
324 // CHECK-LABEL: @test_vcpop_v_u16m8_m(
325 // CHECK-NEXT: entry:
326 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcpopv.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
327 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
329 vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t mask, vuint16m8_t vs2, size_t vl) {
330 return __riscv_vcpop(mask, vs2, vl);
333 // CHECK-LABEL: @test_vcpop_v_u32mf2_m(
334 // CHECK-NEXT: entry:
335 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcpopv.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
336 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
338 vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t mask, vuint32mf2_t vs2, size_t vl) {
339 return __riscv_vcpop(mask, vs2, vl);
342 // CHECK-LABEL: @test_vcpop_v_u32m1_m(
343 // CHECK-NEXT: entry:
344 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcpopv.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
345 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
347 vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t mask, vuint32m1_t vs2, size_t vl) {
348 return __riscv_vcpop(mask, vs2, vl);
351 // CHECK-LABEL: @test_vcpop_v_u32m2_m(
352 // CHECK-NEXT: entry:
353 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcpopv.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
354 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
356 vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t mask, vuint32m2_t vs2, size_t vl) {
357 return __riscv_vcpop(mask, vs2, vl);
360 // CHECK-LABEL: @test_vcpop_v_u32m4_m(
361 // CHECK-NEXT: entry:
362 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcpopv.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
363 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
365 vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t mask, vuint32m4_t vs2, size_t vl) {
366 return __riscv_vcpop(mask, vs2, vl);
369 // CHECK-LABEL: @test_vcpop_v_u32m8_m(
370 // CHECK-NEXT: entry:
371 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcpopv.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
372 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
374 vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t mask, vuint32m8_t vs2, size_t vl) {
375 return __riscv_vcpop(mask, vs2, vl);
378 // CHECK-LABEL: @test_vcpop_v_u64m1_m(
379 // CHECK-NEXT: entry:
380 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcpopv.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
381 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
383 vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t mask, vuint64m1_t vs2, size_t vl) {
384 return __riscv_vcpop(mask, vs2, vl);
387 // CHECK-LABEL: @test_vcpop_v_u64m2_m(
388 // CHECK-NEXT: entry:
389 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcpopv.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
390 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
392 vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t mask, vuint64m2_t vs2, size_t vl) {
393 return __riscv_vcpop(mask, vs2, vl);
396 // CHECK-LABEL: @test_vcpop_v_u64m4_m(
397 // CHECK-NEXT: entry:
398 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcpopv.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
399 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
401 vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t mask, vuint64m4_t vs2, size_t vl) {
402 return __riscv_vcpop(mask, vs2, vl);
405 // CHECK-LABEL: @test_vcpop_v_u64m8_m(
406 // CHECK-NEXT: entry:
407 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcpopv.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
408 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
410 vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t mask, vuint64m8_t vs2, size_t vl) {
411 return __riscv_vcpop(mask, vs2, vl);