1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
6 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb,+m -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
8 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
11 declare <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i1>, i32)
13 define <vscale x 1 x i8> @vp_ctpop_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
14 ; CHECK-LABEL: vp_ctpop_nxv1i8:
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
18 ; CHECK-NEXT: li a0, 85
19 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
20 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
21 ; CHECK-NEXT: li a0, 51
22 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
23 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
24 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
25 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
26 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
27 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
28 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
31 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i8:
32 ; CHECK-ZVBB: # %bb.0:
33 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
34 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
35 ; CHECK-ZVBB-NEXT: ret
36 %v = call <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
37 ret <vscale x 1 x i8> %v
40 define <vscale x 1 x i8> @vp_ctpop_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
41 ; CHECK-LABEL: vp_ctpop_nxv1i8_unmasked:
43 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
44 ; CHECK-NEXT: vsrl.vi v9, v8, 1
45 ; CHECK-NEXT: li a0, 85
46 ; CHECK-NEXT: vand.vx v9, v9, a0
47 ; CHECK-NEXT: vsub.vv v8, v8, v9
48 ; CHECK-NEXT: li a0, 51
49 ; CHECK-NEXT: vand.vx v9, v8, a0
50 ; CHECK-NEXT: vsrl.vi v8, v8, 2
51 ; CHECK-NEXT: vand.vx v8, v8, a0
52 ; CHECK-NEXT: vadd.vv v8, v9, v8
53 ; CHECK-NEXT: vsrl.vi v9, v8, 4
54 ; CHECK-NEXT: vadd.vv v8, v8, v9
55 ; CHECK-NEXT: vand.vi v8, v8, 15
58 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i8_unmasked:
59 ; CHECK-ZVBB: # %bb.0:
60 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
61 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
62 ; CHECK-ZVBB-NEXT: ret
63 %v = call <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
64 ret <vscale x 1 x i8> %v
67 declare <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
69 define <vscale x 2 x i8> @vp_ctpop_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
70 ; CHECK-LABEL: vp_ctpop_nxv2i8:
72 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
73 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
74 ; CHECK-NEXT: li a0, 85
75 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
76 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
77 ; CHECK-NEXT: li a0, 51
78 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
79 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
80 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
81 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
82 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
83 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
84 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
87 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv2i8:
88 ; CHECK-ZVBB: # %bb.0:
89 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
90 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
91 ; CHECK-ZVBB-NEXT: ret
92 %v = call <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
93 ret <vscale x 2 x i8> %v
96 define <vscale x 2 x i8> @vp_ctpop_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
97 ; CHECK-LABEL: vp_ctpop_nxv2i8_unmasked:
99 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
100 ; CHECK-NEXT: vsrl.vi v9, v8, 1
101 ; CHECK-NEXT: li a0, 85
102 ; CHECK-NEXT: vand.vx v9, v9, a0
103 ; CHECK-NEXT: vsub.vv v8, v8, v9
104 ; CHECK-NEXT: li a0, 51
105 ; CHECK-NEXT: vand.vx v9, v8, a0
106 ; CHECK-NEXT: vsrl.vi v8, v8, 2
107 ; CHECK-NEXT: vand.vx v8, v8, a0
108 ; CHECK-NEXT: vadd.vv v8, v9, v8
109 ; CHECK-NEXT: vsrl.vi v9, v8, 4
110 ; CHECK-NEXT: vadd.vv v8, v8, v9
111 ; CHECK-NEXT: vand.vi v8, v8, 15
114 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv2i8_unmasked:
115 ; CHECK-ZVBB: # %bb.0:
116 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
117 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
118 ; CHECK-ZVBB-NEXT: ret
119 %v = call <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
120 ret <vscale x 2 x i8> %v
123 declare <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i32)
125 define <vscale x 4 x i8> @vp_ctpop_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
126 ; CHECK-LABEL: vp_ctpop_nxv4i8:
128 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
129 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
130 ; CHECK-NEXT: li a0, 85
131 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
132 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
133 ; CHECK-NEXT: li a0, 51
134 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
135 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
136 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
137 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
138 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
139 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
140 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
143 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv4i8:
144 ; CHECK-ZVBB: # %bb.0:
145 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
146 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
147 ; CHECK-ZVBB-NEXT: ret
148 %v = call <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
149 ret <vscale x 4 x i8> %v
152 define <vscale x 4 x i8> @vp_ctpop_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
153 ; CHECK-LABEL: vp_ctpop_nxv4i8_unmasked:
155 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
156 ; CHECK-NEXT: vsrl.vi v9, v8, 1
157 ; CHECK-NEXT: li a0, 85
158 ; CHECK-NEXT: vand.vx v9, v9, a0
159 ; CHECK-NEXT: vsub.vv v8, v8, v9
160 ; CHECK-NEXT: li a0, 51
161 ; CHECK-NEXT: vand.vx v9, v8, a0
162 ; CHECK-NEXT: vsrl.vi v8, v8, 2
163 ; CHECK-NEXT: vand.vx v8, v8, a0
164 ; CHECK-NEXT: vadd.vv v8, v9, v8
165 ; CHECK-NEXT: vsrl.vi v9, v8, 4
166 ; CHECK-NEXT: vadd.vv v8, v8, v9
167 ; CHECK-NEXT: vand.vi v8, v8, 15
170 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv4i8_unmasked:
171 ; CHECK-ZVBB: # %bb.0:
172 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
173 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
174 ; CHECK-ZVBB-NEXT: ret
175 %v = call <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
176 ret <vscale x 4 x i8> %v
179 declare <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i32)
181 define <vscale x 8 x i8> @vp_ctpop_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
182 ; CHECK-LABEL: vp_ctpop_nxv8i8:
184 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
185 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
186 ; CHECK-NEXT: li a0, 85
187 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
188 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
189 ; CHECK-NEXT: li a0, 51
190 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
191 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
192 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
193 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
194 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
195 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
196 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
199 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv8i8:
200 ; CHECK-ZVBB: # %bb.0:
201 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma
202 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
203 ; CHECK-ZVBB-NEXT: ret
204 %v = call <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
205 ret <vscale x 8 x i8> %v
208 define <vscale x 8 x i8> @vp_ctpop_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
209 ; CHECK-LABEL: vp_ctpop_nxv8i8_unmasked:
211 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
212 ; CHECK-NEXT: vsrl.vi v9, v8, 1
213 ; CHECK-NEXT: li a0, 85
214 ; CHECK-NEXT: vand.vx v9, v9, a0
215 ; CHECK-NEXT: vsub.vv v8, v8, v9
216 ; CHECK-NEXT: li a0, 51
217 ; CHECK-NEXT: vand.vx v9, v8, a0
218 ; CHECK-NEXT: vsrl.vi v8, v8, 2
219 ; CHECK-NEXT: vand.vx v8, v8, a0
220 ; CHECK-NEXT: vadd.vv v8, v9, v8
221 ; CHECK-NEXT: vsrl.vi v9, v8, 4
222 ; CHECK-NEXT: vadd.vv v8, v8, v9
223 ; CHECK-NEXT: vand.vi v8, v8, 15
226 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv8i8_unmasked:
227 ; CHECK-ZVBB: # %bb.0:
228 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma
229 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
230 ; CHECK-ZVBB-NEXT: ret
231 %v = call <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
232 ret <vscale x 8 x i8> %v
235 declare <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i32)
237 define <vscale x 16 x i8> @vp_ctpop_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
238 ; CHECK-LABEL: vp_ctpop_nxv16i8:
240 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
241 ; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t
242 ; CHECK-NEXT: li a0, 85
243 ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t
244 ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
245 ; CHECK-NEXT: li a0, 51
246 ; CHECK-NEXT: vand.vx v10, v8, a0, v0.t
247 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
248 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
249 ; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t
250 ; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t
251 ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
252 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
255 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i8:
256 ; CHECK-ZVBB: # %bb.0:
257 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma
258 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
259 ; CHECK-ZVBB-NEXT: ret
260 %v = call <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
261 ret <vscale x 16 x i8> %v
264 define <vscale x 16 x i8> @vp_ctpop_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
265 ; CHECK-LABEL: vp_ctpop_nxv16i8_unmasked:
267 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
268 ; CHECK-NEXT: vsrl.vi v10, v8, 1
269 ; CHECK-NEXT: li a0, 85
270 ; CHECK-NEXT: vand.vx v10, v10, a0
271 ; CHECK-NEXT: vsub.vv v8, v8, v10
272 ; CHECK-NEXT: li a0, 51
273 ; CHECK-NEXT: vand.vx v10, v8, a0
274 ; CHECK-NEXT: vsrl.vi v8, v8, 2
275 ; CHECK-NEXT: vand.vx v8, v8, a0
276 ; CHECK-NEXT: vadd.vv v8, v10, v8
277 ; CHECK-NEXT: vsrl.vi v10, v8, 4
278 ; CHECK-NEXT: vadd.vv v8, v8, v10
279 ; CHECK-NEXT: vand.vi v8, v8, 15
282 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i8_unmasked:
283 ; CHECK-ZVBB: # %bb.0:
284 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma
285 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
286 ; CHECK-ZVBB-NEXT: ret
287 %v = call <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
288 ret <vscale x 16 x i8> %v
291 declare <vscale x 32 x i8> @llvm.vp.ctpop.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i1>, i32)
293 define <vscale x 32 x i8> @vp_ctpop_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
294 ; CHECK-LABEL: vp_ctpop_nxv32i8:
296 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
297 ; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t
298 ; CHECK-NEXT: li a0, 85
299 ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t
300 ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
301 ; CHECK-NEXT: li a0, 51
302 ; CHECK-NEXT: vand.vx v12, v8, a0, v0.t
303 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
304 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
305 ; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t
306 ; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t
307 ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
308 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
311 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv32i8:
312 ; CHECK-ZVBB: # %bb.0:
313 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma
314 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
315 ; CHECK-ZVBB-NEXT: ret
316 %v = call <vscale x 32 x i8> @llvm.vp.ctpop.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
317 ret <vscale x 32 x i8> %v
320 define <vscale x 32 x i8> @vp_ctpop_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
321 ; CHECK-LABEL: vp_ctpop_nxv32i8_unmasked:
323 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
324 ; CHECK-NEXT: vsrl.vi v12, v8, 1
325 ; CHECK-NEXT: li a0, 85
326 ; CHECK-NEXT: vand.vx v12, v12, a0
327 ; CHECK-NEXT: vsub.vv v8, v8, v12
328 ; CHECK-NEXT: li a0, 51
329 ; CHECK-NEXT: vand.vx v12, v8, a0
330 ; CHECK-NEXT: vsrl.vi v8, v8, 2
331 ; CHECK-NEXT: vand.vx v8, v8, a0
332 ; CHECK-NEXT: vadd.vv v8, v12, v8
333 ; CHECK-NEXT: vsrl.vi v12, v8, 4
334 ; CHECK-NEXT: vadd.vv v8, v8, v12
335 ; CHECK-NEXT: vand.vi v8, v8, 15
338 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv32i8_unmasked:
339 ; CHECK-ZVBB: # %bb.0:
340 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma
341 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
342 ; CHECK-ZVBB-NEXT: ret
343 %v = call <vscale x 32 x i8> @llvm.vp.ctpop.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
344 ret <vscale x 32 x i8> %v
347 declare <vscale x 64 x i8> @llvm.vp.ctpop.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i1>, i32)
349 define <vscale x 64 x i8> @vp_ctpop_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
350 ; CHECK-LABEL: vp_ctpop_nxv64i8:
352 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
353 ; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t
354 ; CHECK-NEXT: li a0, 85
355 ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t
356 ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
357 ; CHECK-NEXT: li a0, 51
358 ; CHECK-NEXT: vand.vx v16, v8, a0, v0.t
359 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
360 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
361 ; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t
362 ; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t
363 ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
364 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
367 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv64i8:
368 ; CHECK-ZVBB: # %bb.0:
369 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m8, ta, ma
370 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
371 ; CHECK-ZVBB-NEXT: ret
372 %v = call <vscale x 64 x i8> @llvm.vp.ctpop.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
373 ret <vscale x 64 x i8> %v
376 define <vscale x 64 x i8> @vp_ctpop_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
377 ; CHECK-LABEL: vp_ctpop_nxv64i8_unmasked:
379 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
380 ; CHECK-NEXT: vsrl.vi v16, v8, 1
381 ; CHECK-NEXT: li a0, 85
382 ; CHECK-NEXT: vand.vx v16, v16, a0
383 ; CHECK-NEXT: vsub.vv v8, v8, v16
384 ; CHECK-NEXT: li a0, 51
385 ; CHECK-NEXT: vand.vx v16, v8, a0
386 ; CHECK-NEXT: vsrl.vi v8, v8, 2
387 ; CHECK-NEXT: vand.vx v8, v8, a0
388 ; CHECK-NEXT: vadd.vv v8, v16, v8
389 ; CHECK-NEXT: vsrl.vi v16, v8, 4
390 ; CHECK-NEXT: vadd.vv v8, v8, v16
391 ; CHECK-NEXT: vand.vi v8, v8, 15
394 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv64i8_unmasked:
395 ; CHECK-ZVBB: # %bb.0:
396 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m8, ta, ma
397 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
398 ; CHECK-ZVBB-NEXT: ret
399 %v = call <vscale x 64 x i8> @llvm.vp.ctpop.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
400 ret <vscale x 64 x i8> %v
403 declare <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i1>, i32)
405 define <vscale x 1 x i16> @vp_ctpop_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
406 ; CHECK-LABEL: vp_ctpop_nxv1i16:
408 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
409 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
410 ; CHECK-NEXT: lui a0, 5
411 ; CHECK-NEXT: addi a0, a0, 1365
412 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
413 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
414 ; CHECK-NEXT: lui a0, 3
415 ; CHECK-NEXT: addi a0, a0, 819
416 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
417 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
418 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
419 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
420 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
421 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
422 ; CHECK-NEXT: lui a0, 1
423 ; CHECK-NEXT: addi a0, a0, -241
424 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
425 ; CHECK-NEXT: li a0, 257
426 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
427 ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t
430 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i16:
431 ; CHECK-ZVBB: # %bb.0:
432 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
433 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
434 ; CHECK-ZVBB-NEXT: ret
435 %v = call <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
436 ret <vscale x 1 x i16> %v
439 define <vscale x 1 x i16> @vp_ctpop_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
440 ; CHECK-LABEL: vp_ctpop_nxv1i16_unmasked:
442 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
443 ; CHECK-NEXT: vsrl.vi v9, v8, 1
444 ; CHECK-NEXT: lui a0, 5
445 ; CHECK-NEXT: addi a0, a0, 1365
446 ; CHECK-NEXT: vand.vx v9, v9, a0
447 ; CHECK-NEXT: vsub.vv v8, v8, v9
448 ; CHECK-NEXT: lui a0, 3
449 ; CHECK-NEXT: addi a0, a0, 819
450 ; CHECK-NEXT: vand.vx v9, v8, a0
451 ; CHECK-NEXT: vsrl.vi v8, v8, 2
452 ; CHECK-NEXT: vand.vx v8, v8, a0
453 ; CHECK-NEXT: vadd.vv v8, v9, v8
454 ; CHECK-NEXT: vsrl.vi v9, v8, 4
455 ; CHECK-NEXT: vadd.vv v8, v8, v9
456 ; CHECK-NEXT: lui a0, 1
457 ; CHECK-NEXT: addi a0, a0, -241
458 ; CHECK-NEXT: vand.vx v8, v8, a0
459 ; CHECK-NEXT: li a0, 257
460 ; CHECK-NEXT: vmul.vx v8, v8, a0
461 ; CHECK-NEXT: vsrl.vi v8, v8, 8
464 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i16_unmasked:
465 ; CHECK-ZVBB: # %bb.0:
466 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
467 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
468 ; CHECK-ZVBB-NEXT: ret
469 %v = call <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
470 ret <vscale x 1 x i16> %v
473 declare <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
475 define <vscale x 2 x i16> @vp_ctpop_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
476 ; CHECK-LABEL: vp_ctpop_nxv2i16:
478 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
479 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
480 ; CHECK-NEXT: lui a0, 5
481 ; CHECK-NEXT: addi a0, a0, 1365
482 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
483 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
484 ; CHECK-NEXT: lui a0, 3
485 ; CHECK-NEXT: addi a0, a0, 819
486 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
487 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
488 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
489 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
490 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
491 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
492 ; CHECK-NEXT: lui a0, 1
493 ; CHECK-NEXT: addi a0, a0, -241
494 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
495 ; CHECK-NEXT: li a0, 257
496 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
497 ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t
500 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv2i16:
501 ; CHECK-ZVBB: # %bb.0:
502 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
503 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
504 ; CHECK-ZVBB-NEXT: ret
505 %v = call <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
506 ret <vscale x 2 x i16> %v
509 define <vscale x 2 x i16> @vp_ctpop_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
510 ; CHECK-LABEL: vp_ctpop_nxv2i16_unmasked:
512 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
513 ; CHECK-NEXT: vsrl.vi v9, v8, 1
514 ; CHECK-NEXT: lui a0, 5
515 ; CHECK-NEXT: addi a0, a0, 1365
516 ; CHECK-NEXT: vand.vx v9, v9, a0
517 ; CHECK-NEXT: vsub.vv v8, v8, v9
518 ; CHECK-NEXT: lui a0, 3
519 ; CHECK-NEXT: addi a0, a0, 819
520 ; CHECK-NEXT: vand.vx v9, v8, a0
521 ; CHECK-NEXT: vsrl.vi v8, v8, 2
522 ; CHECK-NEXT: vand.vx v8, v8, a0
523 ; CHECK-NEXT: vadd.vv v8, v9, v8
524 ; CHECK-NEXT: vsrl.vi v9, v8, 4
525 ; CHECK-NEXT: vadd.vv v8, v8, v9
526 ; CHECK-NEXT: lui a0, 1
527 ; CHECK-NEXT: addi a0, a0, -241
528 ; CHECK-NEXT: vand.vx v8, v8, a0
529 ; CHECK-NEXT: li a0, 257
530 ; CHECK-NEXT: vmul.vx v8, v8, a0
531 ; CHECK-NEXT: vsrl.vi v8, v8, 8
534 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv2i16_unmasked:
535 ; CHECK-ZVBB: # %bb.0:
536 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
537 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
538 ; CHECK-ZVBB-NEXT: ret
539 %v = call <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
540 ret <vscale x 2 x i16> %v
543 declare <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i32)
545 define <vscale x 4 x i16> @vp_ctpop_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
546 ; CHECK-LABEL: vp_ctpop_nxv4i16:
548 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
549 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
550 ; CHECK-NEXT: lui a0, 5
551 ; CHECK-NEXT: addi a0, a0, 1365
552 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
553 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
554 ; CHECK-NEXT: lui a0, 3
555 ; CHECK-NEXT: addi a0, a0, 819
556 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
557 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
558 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
559 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
560 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
561 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
562 ; CHECK-NEXT: lui a0, 1
563 ; CHECK-NEXT: addi a0, a0, -241
564 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
565 ; CHECK-NEXT: li a0, 257
566 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
567 ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t
570 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv4i16:
571 ; CHECK-ZVBB: # %bb.0:
572 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma
573 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
574 ; CHECK-ZVBB-NEXT: ret
575 %v = call <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
576 ret <vscale x 4 x i16> %v
579 define <vscale x 4 x i16> @vp_ctpop_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
580 ; CHECK-LABEL: vp_ctpop_nxv4i16_unmasked:
582 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
583 ; CHECK-NEXT: vsrl.vi v9, v8, 1
584 ; CHECK-NEXT: lui a0, 5
585 ; CHECK-NEXT: addi a0, a0, 1365
586 ; CHECK-NEXT: vand.vx v9, v9, a0
587 ; CHECK-NEXT: vsub.vv v8, v8, v9
588 ; CHECK-NEXT: lui a0, 3
589 ; CHECK-NEXT: addi a0, a0, 819
590 ; CHECK-NEXT: vand.vx v9, v8, a0
591 ; CHECK-NEXT: vsrl.vi v8, v8, 2
592 ; CHECK-NEXT: vand.vx v8, v8, a0
593 ; CHECK-NEXT: vadd.vv v8, v9, v8
594 ; CHECK-NEXT: vsrl.vi v9, v8, 4
595 ; CHECK-NEXT: vadd.vv v8, v8, v9
596 ; CHECK-NEXT: lui a0, 1
597 ; CHECK-NEXT: addi a0, a0, -241
598 ; CHECK-NEXT: vand.vx v8, v8, a0
599 ; CHECK-NEXT: li a0, 257
600 ; CHECK-NEXT: vmul.vx v8, v8, a0
601 ; CHECK-NEXT: vsrl.vi v8, v8, 8
604 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv4i16_unmasked:
605 ; CHECK-ZVBB: # %bb.0:
606 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma
607 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
608 ; CHECK-ZVBB-NEXT: ret
609 %v = call <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
610 ret <vscale x 4 x i16> %v
613 declare <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i32)
615 define <vscale x 8 x i16> @vp_ctpop_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
616 ; CHECK-LABEL: vp_ctpop_nxv8i16:
618 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
619 ; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t
620 ; CHECK-NEXT: lui a0, 5
621 ; CHECK-NEXT: addi a0, a0, 1365
622 ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t
623 ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
624 ; CHECK-NEXT: lui a0, 3
625 ; CHECK-NEXT: addi a0, a0, 819
626 ; CHECK-NEXT: vand.vx v10, v8, a0, v0.t
627 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
628 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
629 ; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t
630 ; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t
631 ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
632 ; CHECK-NEXT: lui a0, 1
633 ; CHECK-NEXT: addi a0, a0, -241
634 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
635 ; CHECK-NEXT: li a0, 257
636 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
637 ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t
640 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv8i16:
641 ; CHECK-ZVBB: # %bb.0:
642 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m2, ta, ma
643 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
644 ; CHECK-ZVBB-NEXT: ret
645 %v = call <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
646 ret <vscale x 8 x i16> %v
649 define <vscale x 8 x i16> @vp_ctpop_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
650 ; CHECK-LABEL: vp_ctpop_nxv8i16_unmasked:
652 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
653 ; CHECK-NEXT: vsrl.vi v10, v8, 1
654 ; CHECK-NEXT: lui a0, 5
655 ; CHECK-NEXT: addi a0, a0, 1365
656 ; CHECK-NEXT: vand.vx v10, v10, a0
657 ; CHECK-NEXT: vsub.vv v8, v8, v10
658 ; CHECK-NEXT: lui a0, 3
659 ; CHECK-NEXT: addi a0, a0, 819
660 ; CHECK-NEXT: vand.vx v10, v8, a0
661 ; CHECK-NEXT: vsrl.vi v8, v8, 2
662 ; CHECK-NEXT: vand.vx v8, v8, a0
663 ; CHECK-NEXT: vadd.vv v8, v10, v8
664 ; CHECK-NEXT: vsrl.vi v10, v8, 4
665 ; CHECK-NEXT: vadd.vv v8, v8, v10
666 ; CHECK-NEXT: lui a0, 1
667 ; CHECK-NEXT: addi a0, a0, -241
668 ; CHECK-NEXT: vand.vx v8, v8, a0
669 ; CHECK-NEXT: li a0, 257
670 ; CHECK-NEXT: vmul.vx v8, v8, a0
671 ; CHECK-NEXT: vsrl.vi v8, v8, 8
674 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv8i16_unmasked:
675 ; CHECK-ZVBB: # %bb.0:
676 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m2, ta, ma
677 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
678 ; CHECK-ZVBB-NEXT: ret
679 %v = call <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
680 ret <vscale x 8 x i16> %v
683 declare <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i1>, i32)
685 define <vscale x 16 x i16> @vp_ctpop_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
686 ; CHECK-LABEL: vp_ctpop_nxv16i16:
688 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
689 ; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t
690 ; CHECK-NEXT: lui a0, 5
691 ; CHECK-NEXT: addi a0, a0, 1365
692 ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t
693 ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
694 ; CHECK-NEXT: lui a0, 3
695 ; CHECK-NEXT: addi a0, a0, 819
696 ; CHECK-NEXT: vand.vx v12, v8, a0, v0.t
697 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
698 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
699 ; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t
700 ; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t
701 ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
702 ; CHECK-NEXT: lui a0, 1
703 ; CHECK-NEXT: addi a0, a0, -241
704 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
705 ; CHECK-NEXT: li a0, 257
706 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
707 ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t
710 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i16:
711 ; CHECK-ZVBB: # %bb.0:
712 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m4, ta, ma
713 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
714 ; CHECK-ZVBB-NEXT: ret
715 %v = call <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
716 ret <vscale x 16 x i16> %v
719 define <vscale x 16 x i16> @vp_ctpop_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
720 ; CHECK-LABEL: vp_ctpop_nxv16i16_unmasked:
722 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
723 ; CHECK-NEXT: vsrl.vi v12, v8, 1
724 ; CHECK-NEXT: lui a0, 5
725 ; CHECK-NEXT: addi a0, a0, 1365
726 ; CHECK-NEXT: vand.vx v12, v12, a0
727 ; CHECK-NEXT: vsub.vv v8, v8, v12
728 ; CHECK-NEXT: lui a0, 3
729 ; CHECK-NEXT: addi a0, a0, 819
730 ; CHECK-NEXT: vand.vx v12, v8, a0
731 ; CHECK-NEXT: vsrl.vi v8, v8, 2
732 ; CHECK-NEXT: vand.vx v8, v8, a0
733 ; CHECK-NEXT: vadd.vv v8, v12, v8
734 ; CHECK-NEXT: vsrl.vi v12, v8, 4
735 ; CHECK-NEXT: vadd.vv v8, v8, v12
736 ; CHECK-NEXT: lui a0, 1
737 ; CHECK-NEXT: addi a0, a0, -241
738 ; CHECK-NEXT: vand.vx v8, v8, a0
739 ; CHECK-NEXT: li a0, 257
740 ; CHECK-NEXT: vmul.vx v8, v8, a0
741 ; CHECK-NEXT: vsrl.vi v8, v8, 8
744 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i16_unmasked:
745 ; CHECK-ZVBB: # %bb.0:
746 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m4, ta, ma
747 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
748 ; CHECK-ZVBB-NEXT: ret
749 %v = call <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
750 ret <vscale x 16 x i16> %v
753 declare <vscale x 32 x i16> @llvm.vp.ctpop.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i1>, i32)
755 define <vscale x 32 x i16> @vp_ctpop_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
756 ; CHECK-LABEL: vp_ctpop_nxv32i16:
758 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
759 ; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t
760 ; CHECK-NEXT: lui a0, 5
761 ; CHECK-NEXT: addi a0, a0, 1365
762 ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t
763 ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
764 ; CHECK-NEXT: lui a0, 3
765 ; CHECK-NEXT: addi a0, a0, 819
766 ; CHECK-NEXT: vand.vx v16, v8, a0, v0.t
767 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
768 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
769 ; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t
770 ; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t
771 ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
772 ; CHECK-NEXT: lui a0, 1
773 ; CHECK-NEXT: addi a0, a0, -241
774 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
775 ; CHECK-NEXT: li a0, 257
776 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
777 ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t
780 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv32i16:
781 ; CHECK-ZVBB: # %bb.0:
782 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
783 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
784 ; CHECK-ZVBB-NEXT: ret
785 %v = call <vscale x 32 x i16> @llvm.vp.ctpop.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
786 ret <vscale x 32 x i16> %v
789 define <vscale x 32 x i16> @vp_ctpop_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
790 ; CHECK-LABEL: vp_ctpop_nxv32i16_unmasked:
792 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
793 ; CHECK-NEXT: vsrl.vi v16, v8, 1
794 ; CHECK-NEXT: lui a0, 5
795 ; CHECK-NEXT: addi a0, a0, 1365
796 ; CHECK-NEXT: vand.vx v16, v16, a0
797 ; CHECK-NEXT: vsub.vv v8, v8, v16
798 ; CHECK-NEXT: lui a0, 3
799 ; CHECK-NEXT: addi a0, a0, 819
800 ; CHECK-NEXT: vand.vx v16, v8, a0
801 ; CHECK-NEXT: vsrl.vi v8, v8, 2
802 ; CHECK-NEXT: vand.vx v8, v8, a0
803 ; CHECK-NEXT: vadd.vv v8, v16, v8
804 ; CHECK-NEXT: vsrl.vi v16, v8, 4
805 ; CHECK-NEXT: vadd.vv v8, v8, v16
806 ; CHECK-NEXT: lui a0, 1
807 ; CHECK-NEXT: addi a0, a0, -241
808 ; CHECK-NEXT: vand.vx v8, v8, a0
809 ; CHECK-NEXT: li a0, 257
810 ; CHECK-NEXT: vmul.vx v8, v8, a0
811 ; CHECK-NEXT: vsrl.vi v8, v8, 8
814 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv32i16_unmasked:
815 ; CHECK-ZVBB: # %bb.0:
816 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
817 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
818 ; CHECK-ZVBB-NEXT: ret
819 %v = call <vscale x 32 x i16> @llvm.vp.ctpop.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
820 ret <vscale x 32 x i16> %v
823 declare <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
825 define <vscale x 1 x i32> @vp_ctpop_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
826 ; CHECK-LABEL: vp_ctpop_nxv1i32:
828 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
829 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
830 ; CHECK-NEXT: lui a0, 349525
831 ; CHECK-NEXT: addi a0, a0, 1365
832 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
833 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
834 ; CHECK-NEXT: lui a0, 209715
835 ; CHECK-NEXT: addi a0, a0, 819
836 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
837 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
838 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
839 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
840 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
841 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
842 ; CHECK-NEXT: lui a0, 61681
843 ; CHECK-NEXT: addi a0, a0, -241
844 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
845 ; CHECK-NEXT: lui a0, 4112
846 ; CHECK-NEXT: addi a0, a0, 257
847 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
848 ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t
851 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i32:
852 ; CHECK-ZVBB: # %bb.0:
853 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
854 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
855 ; CHECK-ZVBB-NEXT: ret
856 %v = call <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
857 ret <vscale x 1 x i32> %v
860 define <vscale x 1 x i32> @vp_ctpop_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
861 ; CHECK-LABEL: vp_ctpop_nxv1i32_unmasked:
863 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
864 ; CHECK-NEXT: vsrl.vi v9, v8, 1
865 ; CHECK-NEXT: lui a0, 349525
866 ; CHECK-NEXT: addi a0, a0, 1365
867 ; CHECK-NEXT: vand.vx v9, v9, a0
868 ; CHECK-NEXT: vsub.vv v8, v8, v9
869 ; CHECK-NEXT: lui a0, 209715
870 ; CHECK-NEXT: addi a0, a0, 819
871 ; CHECK-NEXT: vand.vx v9, v8, a0
872 ; CHECK-NEXT: vsrl.vi v8, v8, 2
873 ; CHECK-NEXT: vand.vx v8, v8, a0
874 ; CHECK-NEXT: vadd.vv v8, v9, v8
875 ; CHECK-NEXT: vsrl.vi v9, v8, 4
876 ; CHECK-NEXT: vadd.vv v8, v8, v9
877 ; CHECK-NEXT: lui a0, 61681
878 ; CHECK-NEXT: addi a0, a0, -241
879 ; CHECK-NEXT: vand.vx v8, v8, a0
880 ; CHECK-NEXT: lui a0, 4112
881 ; CHECK-NEXT: addi a0, a0, 257
882 ; CHECK-NEXT: vmul.vx v8, v8, a0
883 ; CHECK-NEXT: vsrl.vi v8, v8, 24
886 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i32_unmasked:
887 ; CHECK-ZVBB: # %bb.0:
888 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
889 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
890 ; CHECK-ZVBB-NEXT: ret
891 %v = call <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
892 ret <vscale x 1 x i32> %v
895 declare <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
897 define <vscale x 2 x i32> @vp_ctpop_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
898 ; CHECK-LABEL: vp_ctpop_nxv2i32:
900 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
901 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
902 ; CHECK-NEXT: lui a0, 349525
903 ; CHECK-NEXT: addi a0, a0, 1365
904 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
905 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
906 ; CHECK-NEXT: lui a0, 209715
907 ; CHECK-NEXT: addi a0, a0, 819
908 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
909 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
910 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
911 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
912 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
913 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
914 ; CHECK-NEXT: lui a0, 61681
915 ; CHECK-NEXT: addi a0, a0, -241
916 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
917 ; CHECK-NEXT: lui a0, 4112
918 ; CHECK-NEXT: addi a0, a0, 257
919 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
920 ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t
923 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv2i32:
924 ; CHECK-ZVBB: # %bb.0:
925 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m1, ta, ma
926 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
927 ; CHECK-ZVBB-NEXT: ret
928 %v = call <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
929 ret <vscale x 2 x i32> %v
932 define <vscale x 2 x i32> @vp_ctpop_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
933 ; CHECK-LABEL: vp_ctpop_nxv2i32_unmasked:
935 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
936 ; CHECK-NEXT: vsrl.vi v9, v8, 1
937 ; CHECK-NEXT: lui a0, 349525
938 ; CHECK-NEXT: addi a0, a0, 1365
939 ; CHECK-NEXT: vand.vx v9, v9, a0
940 ; CHECK-NEXT: vsub.vv v8, v8, v9
941 ; CHECK-NEXT: lui a0, 209715
942 ; CHECK-NEXT: addi a0, a0, 819
943 ; CHECK-NEXT: vand.vx v9, v8, a0
944 ; CHECK-NEXT: vsrl.vi v8, v8, 2
945 ; CHECK-NEXT: vand.vx v8, v8, a0
946 ; CHECK-NEXT: vadd.vv v8, v9, v8
947 ; CHECK-NEXT: vsrl.vi v9, v8, 4
948 ; CHECK-NEXT: vadd.vv v8, v8, v9
949 ; CHECK-NEXT: lui a0, 61681
950 ; CHECK-NEXT: addi a0, a0, -241
951 ; CHECK-NEXT: vand.vx v8, v8, a0
952 ; CHECK-NEXT: lui a0, 4112
953 ; CHECK-NEXT: addi a0, a0, 257
954 ; CHECK-NEXT: vmul.vx v8, v8, a0
955 ; CHECK-NEXT: vsrl.vi v8, v8, 24
958 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv2i32_unmasked:
959 ; CHECK-ZVBB: # %bb.0:
960 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m1, ta, ma
961 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
962 ; CHECK-ZVBB-NEXT: ret
963 %v = call <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
964 ret <vscale x 2 x i32> %v
967 declare <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
969 define <vscale x 4 x i32> @vp_ctpop_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
970 ; CHECK-LABEL: vp_ctpop_nxv4i32:
972 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
973 ; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t
974 ; CHECK-NEXT: lui a0, 349525
975 ; CHECK-NEXT: addi a0, a0, 1365
976 ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t
977 ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
978 ; CHECK-NEXT: lui a0, 209715
979 ; CHECK-NEXT: addi a0, a0, 819
980 ; CHECK-NEXT: vand.vx v10, v8, a0, v0.t
981 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
982 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
983 ; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t
984 ; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t
985 ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
986 ; CHECK-NEXT: lui a0, 61681
987 ; CHECK-NEXT: addi a0, a0, -241
988 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
989 ; CHECK-NEXT: lui a0, 4112
990 ; CHECK-NEXT: addi a0, a0, 257
991 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
992 ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t
995 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv4i32:
996 ; CHECK-ZVBB: # %bb.0:
997 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m2, ta, ma
998 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
999 ; CHECK-ZVBB-NEXT: ret
1000 %v = call <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
1001 ret <vscale x 4 x i32> %v
1004 define <vscale x 4 x i32> @vp_ctpop_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
1005 ; CHECK-LABEL: vp_ctpop_nxv4i32_unmasked:
1007 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1008 ; CHECK-NEXT: vsrl.vi v10, v8, 1
1009 ; CHECK-NEXT: lui a0, 349525
1010 ; CHECK-NEXT: addi a0, a0, 1365
1011 ; CHECK-NEXT: vand.vx v10, v10, a0
1012 ; CHECK-NEXT: vsub.vv v8, v8, v10
1013 ; CHECK-NEXT: lui a0, 209715
1014 ; CHECK-NEXT: addi a0, a0, 819
1015 ; CHECK-NEXT: vand.vx v10, v8, a0
1016 ; CHECK-NEXT: vsrl.vi v8, v8, 2
1017 ; CHECK-NEXT: vand.vx v8, v8, a0
1018 ; CHECK-NEXT: vadd.vv v8, v10, v8
1019 ; CHECK-NEXT: vsrl.vi v10, v8, 4
1020 ; CHECK-NEXT: vadd.vv v8, v8, v10
1021 ; CHECK-NEXT: lui a0, 61681
1022 ; CHECK-NEXT: addi a0, a0, -241
1023 ; CHECK-NEXT: vand.vx v8, v8, a0
1024 ; CHECK-NEXT: lui a0, 4112
1025 ; CHECK-NEXT: addi a0, a0, 257
1026 ; CHECK-NEXT: vmul.vx v8, v8, a0
1027 ; CHECK-NEXT: vsrl.vi v8, v8, 24
1030 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv4i32_unmasked:
1031 ; CHECK-ZVBB: # %bb.0:
1032 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1033 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
1034 ; CHECK-ZVBB-NEXT: ret
1035 %v = call <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1036 ret <vscale x 4 x i32> %v
1039 declare <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1041 define <vscale x 8 x i32> @vp_ctpop_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1042 ; CHECK-LABEL: vp_ctpop_nxv8i32:
1044 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1045 ; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t
1046 ; CHECK-NEXT: lui a0, 349525
1047 ; CHECK-NEXT: addi a0, a0, 1365
1048 ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t
1049 ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
1050 ; CHECK-NEXT: lui a0, 209715
1051 ; CHECK-NEXT: addi a0, a0, 819
1052 ; CHECK-NEXT: vand.vx v12, v8, a0, v0.t
1053 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
1054 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1055 ; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t
1056 ; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t
1057 ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
1058 ; CHECK-NEXT: lui a0, 61681
1059 ; CHECK-NEXT: addi a0, a0, -241
1060 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1061 ; CHECK-NEXT: lui a0, 4112
1062 ; CHECK-NEXT: addi a0, a0, 257
1063 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
1064 ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t
1067 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv8i32:
1068 ; CHECK-ZVBB: # %bb.0:
1069 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1070 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
1071 ; CHECK-ZVBB-NEXT: ret
1072 %v = call <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
1073 ret <vscale x 8 x i32> %v
1076 define <vscale x 8 x i32> @vp_ctpop_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
1077 ; CHECK-LABEL: vp_ctpop_nxv8i32_unmasked:
1079 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1080 ; CHECK-NEXT: vsrl.vi v12, v8, 1
1081 ; CHECK-NEXT: lui a0, 349525
1082 ; CHECK-NEXT: addi a0, a0, 1365
1083 ; CHECK-NEXT: vand.vx v12, v12, a0
1084 ; CHECK-NEXT: vsub.vv v8, v8, v12
1085 ; CHECK-NEXT: lui a0, 209715
1086 ; CHECK-NEXT: addi a0, a0, 819
1087 ; CHECK-NEXT: vand.vx v12, v8, a0
1088 ; CHECK-NEXT: vsrl.vi v8, v8, 2
1089 ; CHECK-NEXT: vand.vx v8, v8, a0
1090 ; CHECK-NEXT: vadd.vv v8, v12, v8
1091 ; CHECK-NEXT: vsrl.vi v12, v8, 4
1092 ; CHECK-NEXT: vadd.vv v8, v8, v12
1093 ; CHECK-NEXT: lui a0, 61681
1094 ; CHECK-NEXT: addi a0, a0, -241
1095 ; CHECK-NEXT: vand.vx v8, v8, a0
1096 ; CHECK-NEXT: lui a0, 4112
1097 ; CHECK-NEXT: addi a0, a0, 257
1098 ; CHECK-NEXT: vmul.vx v8, v8, a0
1099 ; CHECK-NEXT: vsrl.vi v8, v8, 24
1102 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv8i32_unmasked:
1103 ; CHECK-ZVBB: # %bb.0:
1104 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1105 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
1106 ; CHECK-ZVBB-NEXT: ret
1107 %v = call <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1108 ret <vscale x 8 x i32> %v
1111 declare <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1113 define <vscale x 16 x i32> @vp_ctpop_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1114 ; CHECK-LABEL: vp_ctpop_nxv16i32:
1116 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1117 ; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t
1118 ; CHECK-NEXT: lui a0, 349525
1119 ; CHECK-NEXT: addi a0, a0, 1365
1120 ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t
1121 ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
1122 ; CHECK-NEXT: lui a0, 209715
1123 ; CHECK-NEXT: addi a0, a0, 819
1124 ; CHECK-NEXT: vand.vx v16, v8, a0, v0.t
1125 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
1126 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1127 ; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t
1128 ; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t
1129 ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
1130 ; CHECK-NEXT: lui a0, 61681
1131 ; CHECK-NEXT: addi a0, a0, -241
1132 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1133 ; CHECK-NEXT: lui a0, 4112
1134 ; CHECK-NEXT: addi a0, a0, 257
1135 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
1136 ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t
1139 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i32:
1140 ; CHECK-ZVBB: # %bb.0:
1141 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1142 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
1143 ; CHECK-ZVBB-NEXT: ret
1144 %v = call <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
1145 ret <vscale x 16 x i32> %v
1148 define <vscale x 16 x i32> @vp_ctpop_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
1149 ; CHECK-LABEL: vp_ctpop_nxv16i32_unmasked:
1151 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1152 ; CHECK-NEXT: vsrl.vi v16, v8, 1
1153 ; CHECK-NEXT: lui a0, 349525
1154 ; CHECK-NEXT: addi a0, a0, 1365
1155 ; CHECK-NEXT: vand.vx v16, v16, a0
1156 ; CHECK-NEXT: vsub.vv v8, v8, v16
1157 ; CHECK-NEXT: lui a0, 209715
1158 ; CHECK-NEXT: addi a0, a0, 819
1159 ; CHECK-NEXT: vand.vx v16, v8, a0
1160 ; CHECK-NEXT: vsrl.vi v8, v8, 2
1161 ; CHECK-NEXT: vand.vx v8, v8, a0
1162 ; CHECK-NEXT: vadd.vv v8, v16, v8
1163 ; CHECK-NEXT: vsrl.vi v16, v8, 4
1164 ; CHECK-NEXT: vadd.vv v8, v8, v16
1165 ; CHECK-NEXT: lui a0, 61681
1166 ; CHECK-NEXT: addi a0, a0, -241
1167 ; CHECK-NEXT: vand.vx v8, v8, a0
1168 ; CHECK-NEXT: lui a0, 4112
1169 ; CHECK-NEXT: addi a0, a0, 257
1170 ; CHECK-NEXT: vmul.vx v8, v8, a0
1171 ; CHECK-NEXT: vsrl.vi v8, v8, 24
1174 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i32_unmasked:
1175 ; CHECK-ZVBB: # %bb.0:
1176 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1177 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
1178 ; CHECK-ZVBB-NEXT: ret
1179 %v = call <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1180 ret <vscale x 16 x i32> %v
1183 declare <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1185 define <vscale x 1 x i64> @vp_ctpop_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1186 ; RV32-LABEL: vp_ctpop_nxv1i64:
1188 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1189 ; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
1190 ; RV32-NEXT: lui a1, 349525
1191 ; RV32-NEXT: addi a1, a1, 1365
1192 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
1193 ; RV32-NEXT: vmv.v.x v10, a1
1194 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1195 ; RV32-NEXT: vand.vv v9, v9, v10, v0.t
1196 ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
1197 ; RV32-NEXT: lui a1, 209715
1198 ; RV32-NEXT: addi a1, a1, 819
1199 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
1200 ; RV32-NEXT: vmv.v.x v9, a1
1201 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1202 ; RV32-NEXT: vand.vv v10, v8, v9, v0.t
1203 ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
1204 ; RV32-NEXT: vand.vv v8, v8, v9, v0.t
1205 ; RV32-NEXT: vadd.vv v8, v10, v8, v0.t
1206 ; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
1207 ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
1208 ; RV32-NEXT: lui a1, 61681
1209 ; RV32-NEXT: addi a1, a1, -241
1210 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
1211 ; RV32-NEXT: vmv.v.x v9, a1
1212 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1213 ; RV32-NEXT: vand.vv v8, v8, v9, v0.t
1214 ; RV32-NEXT: lui a1, 4112
1215 ; RV32-NEXT: addi a1, a1, 257
1216 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
1217 ; RV32-NEXT: vmv.v.x v9, a1
1218 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1219 ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t
1220 ; RV32-NEXT: li a0, 56
1221 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
1224 ; RV64-LABEL: vp_ctpop_nxv1i64:
1226 ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1227 ; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
1228 ; RV64-NEXT: lui a0, 349525
1229 ; RV64-NEXT: addiw a0, a0, 1365
1230 ; RV64-NEXT: slli a1, a0, 32
1231 ; RV64-NEXT: add a0, a0, a1
1232 ; RV64-NEXT: vand.vx v9, v9, a0, v0.t
1233 ; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
1234 ; RV64-NEXT: lui a0, 209715
1235 ; RV64-NEXT: addiw a0, a0, 819
1236 ; RV64-NEXT: slli a1, a0, 32
1237 ; RV64-NEXT: add a0, a0, a1
1238 ; RV64-NEXT: vand.vx v9, v8, a0, v0.t
1239 ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
1240 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1241 ; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
1242 ; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
1243 ; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
1244 ; RV64-NEXT: lui a0, 61681
1245 ; RV64-NEXT: addiw a0, a0, -241
1246 ; RV64-NEXT: slli a1, a0, 32
1247 ; RV64-NEXT: add a0, a0, a1
1248 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1249 ; RV64-NEXT: lui a0, 4112
1250 ; RV64-NEXT: addiw a0, a0, 257
1251 ; RV64-NEXT: slli a1, a0, 32
1252 ; RV64-NEXT: add a0, a0, a1
1253 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
1254 ; RV64-NEXT: li a0, 56
1255 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
1258 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i64:
1259 ; CHECK-ZVBB: # %bb.0:
1260 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1261 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
1262 ; CHECK-ZVBB-NEXT: ret
1263 %v = call <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
1264 ret <vscale x 1 x i64> %v
1267 define <vscale x 1 x i64> @vp_ctpop_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
1268 ; RV32-LABEL: vp_ctpop_nxv1i64_unmasked:
1270 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1271 ; RV32-NEXT: vsrl.vi v9, v8, 1
1272 ; RV32-NEXT: lui a1, 349525
1273 ; RV32-NEXT: addi a1, a1, 1365
1274 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
1275 ; RV32-NEXT: vmv.v.x v10, a1
1276 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1277 ; RV32-NEXT: vand.vv v9, v9, v10
1278 ; RV32-NEXT: vsub.vv v8, v8, v9
1279 ; RV32-NEXT: lui a1, 209715
1280 ; RV32-NEXT: addi a1, a1, 819
1281 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
1282 ; RV32-NEXT: vmv.v.x v9, a1
1283 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1284 ; RV32-NEXT: vand.vv v10, v8, v9
1285 ; RV32-NEXT: vsrl.vi v8, v8, 2
1286 ; RV32-NEXT: vand.vv v8, v8, v9
1287 ; RV32-NEXT: vadd.vv v8, v10, v8
1288 ; RV32-NEXT: vsrl.vi v9, v8, 4
1289 ; RV32-NEXT: vadd.vv v8, v8, v9
1290 ; RV32-NEXT: lui a1, 61681
1291 ; RV32-NEXT: addi a1, a1, -241
1292 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
1293 ; RV32-NEXT: vmv.v.x v9, a1
1294 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1295 ; RV32-NEXT: vand.vv v8, v8, v9
1296 ; RV32-NEXT: lui a1, 4112
1297 ; RV32-NEXT: addi a1, a1, 257
1298 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma
1299 ; RV32-NEXT: vmv.v.x v9, a1
1300 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1301 ; RV32-NEXT: vmul.vv v8, v8, v9
1302 ; RV32-NEXT: li a0, 56
1303 ; RV32-NEXT: vsrl.vx v8, v8, a0
1306 ; RV64-LABEL: vp_ctpop_nxv1i64_unmasked:
1308 ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1309 ; RV64-NEXT: vsrl.vi v9, v8, 1
1310 ; RV64-NEXT: lui a0, 349525
1311 ; RV64-NEXT: addiw a0, a0, 1365
1312 ; RV64-NEXT: slli a1, a0, 32
1313 ; RV64-NEXT: add a0, a0, a1
1314 ; RV64-NEXT: vand.vx v9, v9, a0
1315 ; RV64-NEXT: vsub.vv v8, v8, v9
1316 ; RV64-NEXT: lui a0, 209715
1317 ; RV64-NEXT: addiw a0, a0, 819
1318 ; RV64-NEXT: slli a1, a0, 32
1319 ; RV64-NEXT: add a0, a0, a1
1320 ; RV64-NEXT: vand.vx v9, v8, a0
1321 ; RV64-NEXT: vsrl.vi v8, v8, 2
1322 ; RV64-NEXT: vand.vx v8, v8, a0
1323 ; RV64-NEXT: vadd.vv v8, v9, v8
1324 ; RV64-NEXT: vsrl.vi v9, v8, 4
1325 ; RV64-NEXT: vadd.vv v8, v8, v9
1326 ; RV64-NEXT: lui a0, 61681
1327 ; RV64-NEXT: addiw a0, a0, -241
1328 ; RV64-NEXT: slli a1, a0, 32
1329 ; RV64-NEXT: add a0, a0, a1
1330 ; RV64-NEXT: vand.vx v8, v8, a0
1331 ; RV64-NEXT: lui a0, 4112
1332 ; RV64-NEXT: addiw a0, a0, 257
1333 ; RV64-NEXT: slli a1, a0, 32
1334 ; RV64-NEXT: add a0, a0, a1
1335 ; RV64-NEXT: vmul.vx v8, v8, a0
1336 ; RV64-NEXT: li a0, 56
1337 ; RV64-NEXT: vsrl.vx v8, v8, a0
1340 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i64_unmasked:
1341 ; CHECK-ZVBB: # %bb.0:
1342 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1343 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
1344 ; CHECK-ZVBB-NEXT: ret
1345 %v = call <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1346 ret <vscale x 1 x i64> %v
1349 declare <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1351 define <vscale x 2 x i64> @vp_ctpop_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1352 ; RV32-LABEL: vp_ctpop_nxv2i64:
1354 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1355 ; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t
1356 ; RV32-NEXT: lui a1, 349525
1357 ; RV32-NEXT: addi a1, a1, 1365
1358 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1359 ; RV32-NEXT: vmv.v.x v12, a1
1360 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1361 ; RV32-NEXT: vand.vv v10, v10, v12, v0.t
1362 ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
1363 ; RV32-NEXT: lui a1, 209715
1364 ; RV32-NEXT: addi a1, a1, 819
1365 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1366 ; RV32-NEXT: vmv.v.x v10, a1
1367 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1368 ; RV32-NEXT: vand.vv v12, v8, v10, v0.t
1369 ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
1370 ; RV32-NEXT: vand.vv v8, v8, v10, v0.t
1371 ; RV32-NEXT: vadd.vv v8, v12, v8, v0.t
1372 ; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
1373 ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
1374 ; RV32-NEXT: lui a1, 61681
1375 ; RV32-NEXT: addi a1, a1, -241
1376 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1377 ; RV32-NEXT: vmv.v.x v10, a1
1378 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1379 ; RV32-NEXT: vand.vv v8, v8, v10, v0.t
1380 ; RV32-NEXT: lui a1, 4112
1381 ; RV32-NEXT: addi a1, a1, 257
1382 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1383 ; RV32-NEXT: vmv.v.x v10, a1
1384 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1385 ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t
1386 ; RV32-NEXT: li a0, 56
1387 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
1390 ; RV64-LABEL: vp_ctpop_nxv2i64:
1392 ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1393 ; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t
1394 ; RV64-NEXT: lui a0, 349525
1395 ; RV64-NEXT: addiw a0, a0, 1365
1396 ; RV64-NEXT: slli a1, a0, 32
1397 ; RV64-NEXT: add a0, a0, a1
1398 ; RV64-NEXT: vand.vx v10, v10, a0, v0.t
1399 ; RV64-NEXT: vsub.vv v8, v8, v10, v0.t
1400 ; RV64-NEXT: lui a0, 209715
1401 ; RV64-NEXT: addiw a0, a0, 819
1402 ; RV64-NEXT: slli a1, a0, 32
1403 ; RV64-NEXT: add a0, a0, a1
1404 ; RV64-NEXT: vand.vx v10, v8, a0, v0.t
1405 ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
1406 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1407 ; RV64-NEXT: vadd.vv v8, v10, v8, v0.t
1408 ; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t
1409 ; RV64-NEXT: vadd.vv v8, v8, v10, v0.t
1410 ; RV64-NEXT: lui a0, 61681
1411 ; RV64-NEXT: addiw a0, a0, -241
1412 ; RV64-NEXT: slli a1, a0, 32
1413 ; RV64-NEXT: add a0, a0, a1
1414 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1415 ; RV64-NEXT: lui a0, 4112
1416 ; RV64-NEXT: addiw a0, a0, 257
1417 ; RV64-NEXT: slli a1, a0, 32
1418 ; RV64-NEXT: add a0, a0, a1
1419 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
1420 ; RV64-NEXT: li a0, 56
1421 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
1424 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv2i64:
1425 ; CHECK-ZVBB: # %bb.0:
1426 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1427 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
1428 ; CHECK-ZVBB-NEXT: ret
1429 %v = call <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
1430 ret <vscale x 2 x i64> %v
1433 define <vscale x 2 x i64> @vp_ctpop_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
1434 ; RV32-LABEL: vp_ctpop_nxv2i64_unmasked:
1436 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1437 ; RV32-NEXT: vsrl.vi v10, v8, 1
1438 ; RV32-NEXT: lui a1, 349525
1439 ; RV32-NEXT: addi a1, a1, 1365
1440 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1441 ; RV32-NEXT: vmv.v.x v12, a1
1442 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1443 ; RV32-NEXT: vand.vv v10, v10, v12
1444 ; RV32-NEXT: vsub.vv v8, v8, v10
1445 ; RV32-NEXT: lui a1, 209715
1446 ; RV32-NEXT: addi a1, a1, 819
1447 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1448 ; RV32-NEXT: vmv.v.x v10, a1
1449 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1450 ; RV32-NEXT: vand.vv v12, v8, v10
1451 ; RV32-NEXT: vsrl.vi v8, v8, 2
1452 ; RV32-NEXT: vand.vv v8, v8, v10
1453 ; RV32-NEXT: vadd.vv v8, v12, v8
1454 ; RV32-NEXT: vsrl.vi v10, v8, 4
1455 ; RV32-NEXT: vadd.vv v8, v8, v10
1456 ; RV32-NEXT: lui a1, 61681
1457 ; RV32-NEXT: addi a1, a1, -241
1458 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1459 ; RV32-NEXT: vmv.v.x v10, a1
1460 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1461 ; RV32-NEXT: vand.vv v8, v8, v10
1462 ; RV32-NEXT: lui a1, 4112
1463 ; RV32-NEXT: addi a1, a1, 257
1464 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1465 ; RV32-NEXT: vmv.v.x v10, a1
1466 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1467 ; RV32-NEXT: vmul.vv v8, v8, v10
1468 ; RV32-NEXT: li a0, 56
1469 ; RV32-NEXT: vsrl.vx v8, v8, a0
1472 ; RV64-LABEL: vp_ctpop_nxv2i64_unmasked:
1474 ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1475 ; RV64-NEXT: vsrl.vi v10, v8, 1
1476 ; RV64-NEXT: lui a0, 349525
1477 ; RV64-NEXT: addiw a0, a0, 1365
1478 ; RV64-NEXT: slli a1, a0, 32
1479 ; RV64-NEXT: add a0, a0, a1
1480 ; RV64-NEXT: vand.vx v10, v10, a0
1481 ; RV64-NEXT: vsub.vv v8, v8, v10
1482 ; RV64-NEXT: lui a0, 209715
1483 ; RV64-NEXT: addiw a0, a0, 819
1484 ; RV64-NEXT: slli a1, a0, 32
1485 ; RV64-NEXT: add a0, a0, a1
1486 ; RV64-NEXT: vand.vx v10, v8, a0
1487 ; RV64-NEXT: vsrl.vi v8, v8, 2
1488 ; RV64-NEXT: vand.vx v8, v8, a0
1489 ; RV64-NEXT: vadd.vv v8, v10, v8
1490 ; RV64-NEXT: vsrl.vi v10, v8, 4
1491 ; RV64-NEXT: vadd.vv v8, v8, v10
1492 ; RV64-NEXT: lui a0, 61681
1493 ; RV64-NEXT: addiw a0, a0, -241
1494 ; RV64-NEXT: slli a1, a0, 32
1495 ; RV64-NEXT: add a0, a0, a1
1496 ; RV64-NEXT: vand.vx v8, v8, a0
1497 ; RV64-NEXT: lui a0, 4112
1498 ; RV64-NEXT: addiw a0, a0, 257
1499 ; RV64-NEXT: slli a1, a0, 32
1500 ; RV64-NEXT: add a0, a0, a1
1501 ; RV64-NEXT: vmul.vx v8, v8, a0
1502 ; RV64-NEXT: li a0, 56
1503 ; RV64-NEXT: vsrl.vx v8, v8, a0
1506 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv2i64_unmasked:
1507 ; CHECK-ZVBB: # %bb.0:
1508 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1509 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
1510 ; CHECK-ZVBB-NEXT: ret
1511 %v = call <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1512 ret <vscale x 2 x i64> %v
1515 declare <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1517 define <vscale x 4 x i64> @vp_ctpop_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1518 ; RV32-LABEL: vp_ctpop_nxv4i64:
1520 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1521 ; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t
1522 ; RV32-NEXT: lui a1, 349525
1523 ; RV32-NEXT: addi a1, a1, 1365
1524 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
1525 ; RV32-NEXT: vmv.v.x v16, a1
1526 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1527 ; RV32-NEXT: vand.vv v12, v12, v16, v0.t
1528 ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
1529 ; RV32-NEXT: lui a1, 209715
1530 ; RV32-NEXT: addi a1, a1, 819
1531 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
1532 ; RV32-NEXT: vmv.v.x v12, a1
1533 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1534 ; RV32-NEXT: vand.vv v16, v8, v12, v0.t
1535 ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
1536 ; RV32-NEXT: vand.vv v8, v8, v12, v0.t
1537 ; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
1538 ; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t
1539 ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
1540 ; RV32-NEXT: lui a1, 61681
1541 ; RV32-NEXT: addi a1, a1, -241
1542 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
1543 ; RV32-NEXT: vmv.v.x v12, a1
1544 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1545 ; RV32-NEXT: vand.vv v8, v8, v12, v0.t
1546 ; RV32-NEXT: lui a1, 4112
1547 ; RV32-NEXT: addi a1, a1, 257
1548 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
1549 ; RV32-NEXT: vmv.v.x v12, a1
1550 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1551 ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t
1552 ; RV32-NEXT: li a0, 56
1553 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
1556 ; RV64-LABEL: vp_ctpop_nxv4i64:
1558 ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1559 ; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t
1560 ; RV64-NEXT: lui a0, 349525
1561 ; RV64-NEXT: addiw a0, a0, 1365
1562 ; RV64-NEXT: slli a1, a0, 32
1563 ; RV64-NEXT: add a0, a0, a1
1564 ; RV64-NEXT: vand.vx v12, v12, a0, v0.t
1565 ; RV64-NEXT: vsub.vv v8, v8, v12, v0.t
1566 ; RV64-NEXT: lui a0, 209715
1567 ; RV64-NEXT: addiw a0, a0, 819
1568 ; RV64-NEXT: slli a1, a0, 32
1569 ; RV64-NEXT: add a0, a0, a1
1570 ; RV64-NEXT: vand.vx v12, v8, a0, v0.t
1571 ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
1572 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1573 ; RV64-NEXT: vadd.vv v8, v12, v8, v0.t
1574 ; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t
1575 ; RV64-NEXT: vadd.vv v8, v8, v12, v0.t
1576 ; RV64-NEXT: lui a0, 61681
1577 ; RV64-NEXT: addiw a0, a0, -241
1578 ; RV64-NEXT: slli a1, a0, 32
1579 ; RV64-NEXT: add a0, a0, a1
1580 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1581 ; RV64-NEXT: lui a0, 4112
1582 ; RV64-NEXT: addiw a0, a0, 257
1583 ; RV64-NEXT: slli a1, a0, 32
1584 ; RV64-NEXT: add a0, a0, a1
1585 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
1586 ; RV64-NEXT: li a0, 56
1587 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
1590 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv4i64:
1591 ; CHECK-ZVBB: # %bb.0:
1592 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1593 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
1594 ; CHECK-ZVBB-NEXT: ret
1595 %v = call <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
1596 ret <vscale x 4 x i64> %v
1599 define <vscale x 4 x i64> @vp_ctpop_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
1600 ; RV32-LABEL: vp_ctpop_nxv4i64_unmasked:
1602 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1603 ; RV32-NEXT: vsrl.vi v12, v8, 1
1604 ; RV32-NEXT: lui a1, 349525
1605 ; RV32-NEXT: addi a1, a1, 1365
1606 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
1607 ; RV32-NEXT: vmv.v.x v16, a1
1608 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1609 ; RV32-NEXT: vand.vv v12, v12, v16
1610 ; RV32-NEXT: vsub.vv v8, v8, v12
1611 ; RV32-NEXT: lui a1, 209715
1612 ; RV32-NEXT: addi a1, a1, 819
1613 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
1614 ; RV32-NEXT: vmv.v.x v12, a1
1615 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1616 ; RV32-NEXT: vand.vv v16, v8, v12
1617 ; RV32-NEXT: vsrl.vi v8, v8, 2
1618 ; RV32-NEXT: vand.vv v8, v8, v12
1619 ; RV32-NEXT: vadd.vv v8, v16, v8
1620 ; RV32-NEXT: vsrl.vi v12, v8, 4
1621 ; RV32-NEXT: vadd.vv v8, v8, v12
1622 ; RV32-NEXT: lui a1, 61681
1623 ; RV32-NEXT: addi a1, a1, -241
1624 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
1625 ; RV32-NEXT: vmv.v.x v12, a1
1626 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1627 ; RV32-NEXT: vand.vv v8, v8, v12
1628 ; RV32-NEXT: lui a1, 4112
1629 ; RV32-NEXT: addi a1, a1, 257
1630 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
1631 ; RV32-NEXT: vmv.v.x v12, a1
1632 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1633 ; RV32-NEXT: vmul.vv v8, v8, v12
1634 ; RV32-NEXT: li a0, 56
1635 ; RV32-NEXT: vsrl.vx v8, v8, a0
1638 ; RV64-LABEL: vp_ctpop_nxv4i64_unmasked:
1640 ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1641 ; RV64-NEXT: vsrl.vi v12, v8, 1
1642 ; RV64-NEXT: lui a0, 349525
1643 ; RV64-NEXT: addiw a0, a0, 1365
1644 ; RV64-NEXT: slli a1, a0, 32
1645 ; RV64-NEXT: add a0, a0, a1
1646 ; RV64-NEXT: vand.vx v12, v12, a0
1647 ; RV64-NEXT: vsub.vv v8, v8, v12
1648 ; RV64-NEXT: lui a0, 209715
1649 ; RV64-NEXT: addiw a0, a0, 819
1650 ; RV64-NEXT: slli a1, a0, 32
1651 ; RV64-NEXT: add a0, a0, a1
1652 ; RV64-NEXT: vand.vx v12, v8, a0
1653 ; RV64-NEXT: vsrl.vi v8, v8, 2
1654 ; RV64-NEXT: vand.vx v8, v8, a0
1655 ; RV64-NEXT: vadd.vv v8, v12, v8
1656 ; RV64-NEXT: vsrl.vi v12, v8, 4
1657 ; RV64-NEXT: vadd.vv v8, v8, v12
1658 ; RV64-NEXT: lui a0, 61681
1659 ; RV64-NEXT: addiw a0, a0, -241
1660 ; RV64-NEXT: slli a1, a0, 32
1661 ; RV64-NEXT: add a0, a0, a1
1662 ; RV64-NEXT: vand.vx v8, v8, a0
1663 ; RV64-NEXT: lui a0, 4112
1664 ; RV64-NEXT: addiw a0, a0, 257
1665 ; RV64-NEXT: slli a1, a0, 32
1666 ; RV64-NEXT: add a0, a0, a1
1667 ; RV64-NEXT: vmul.vx v8, v8, a0
1668 ; RV64-NEXT: li a0, 56
1669 ; RV64-NEXT: vsrl.vx v8, v8, a0
1672 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv4i64_unmasked:
1673 ; CHECK-ZVBB: # %bb.0:
1674 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1675 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
1676 ; CHECK-ZVBB-NEXT: ret
1677 %v = call <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1678 ret <vscale x 4 x i64> %v
1681 declare <vscale x 7 x i64> @llvm.vp.ctpop.nxv7i64(<vscale x 7 x i64>, <vscale x 7 x i1>, i32)
1683 define <vscale x 7 x i64> @vp_ctpop_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
1684 ; RV32-LABEL: vp_ctpop_nxv7i64:
1686 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1687 ; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
1688 ; RV32-NEXT: lui a1, 349525
1689 ; RV32-NEXT: addi a1, a1, 1365
1690 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1691 ; RV32-NEXT: vmv.v.x v24, a1
1692 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1693 ; RV32-NEXT: vand.vv v16, v16, v24, v0.t
1694 ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
1695 ; RV32-NEXT: lui a1, 209715
1696 ; RV32-NEXT: addi a1, a1, 819
1697 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1698 ; RV32-NEXT: vmv.v.x v16, a1
1699 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1700 ; RV32-NEXT: vand.vv v24, v8, v16, v0.t
1701 ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
1702 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
1703 ; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
1704 ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
1705 ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
1706 ; RV32-NEXT: lui a1, 61681
1707 ; RV32-NEXT: addi a1, a1, -241
1708 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1709 ; RV32-NEXT: vmv.v.x v16, a1
1710 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1711 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
1712 ; RV32-NEXT: lui a1, 4112
1713 ; RV32-NEXT: addi a1, a1, 257
1714 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1715 ; RV32-NEXT: vmv.v.x v16, a1
1716 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1717 ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
1718 ; RV32-NEXT: li a0, 56
1719 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
1722 ; RV64-LABEL: vp_ctpop_nxv7i64:
1724 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1725 ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
1726 ; RV64-NEXT: lui a0, 349525
1727 ; RV64-NEXT: addiw a0, a0, 1365
1728 ; RV64-NEXT: slli a1, a0, 32
1729 ; RV64-NEXT: add a0, a0, a1
1730 ; RV64-NEXT: vand.vx v16, v16, a0, v0.t
1731 ; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
1732 ; RV64-NEXT: lui a0, 209715
1733 ; RV64-NEXT: addiw a0, a0, 819
1734 ; RV64-NEXT: slli a1, a0, 32
1735 ; RV64-NEXT: add a0, a0, a1
1736 ; RV64-NEXT: vand.vx v16, v8, a0, v0.t
1737 ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
1738 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1739 ; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
1740 ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
1741 ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
1742 ; RV64-NEXT: lui a0, 61681
1743 ; RV64-NEXT: addiw a0, a0, -241
1744 ; RV64-NEXT: slli a1, a0, 32
1745 ; RV64-NEXT: add a0, a0, a1
1746 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1747 ; RV64-NEXT: lui a0, 4112
1748 ; RV64-NEXT: addiw a0, a0, 257
1749 ; RV64-NEXT: slli a1, a0, 32
1750 ; RV64-NEXT: add a0, a0, a1
1751 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
1752 ; RV64-NEXT: li a0, 56
1753 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
1756 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv7i64:
1757 ; CHECK-ZVBB: # %bb.0:
1758 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1759 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
1760 ; CHECK-ZVBB-NEXT: ret
1761 %v = call <vscale x 7 x i64> @llvm.vp.ctpop.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 %evl)
1762 ret <vscale x 7 x i64> %v
1765 define <vscale x 7 x i64> @vp_ctpop_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 zeroext %evl) {
1766 ; RV32-LABEL: vp_ctpop_nxv7i64_unmasked:
1768 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1769 ; RV32-NEXT: vsrl.vi v16, v8, 1
1770 ; RV32-NEXT: lui a1, 349525
1771 ; RV32-NEXT: addi a1, a1, 1365
1772 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1773 ; RV32-NEXT: vmv.v.x v24, a1
1774 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1775 ; RV32-NEXT: vand.vv v16, v16, v24
1776 ; RV32-NEXT: vsub.vv v8, v8, v16
1777 ; RV32-NEXT: lui a1, 209715
1778 ; RV32-NEXT: addi a1, a1, 819
1779 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1780 ; RV32-NEXT: vmv.v.x v16, a1
1781 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1782 ; RV32-NEXT: vand.vv v24, v8, v16
1783 ; RV32-NEXT: vsrl.vi v8, v8, 2
1784 ; RV32-NEXT: vand.vv v8, v8, v16
1785 ; RV32-NEXT: vadd.vv v8, v24, v8
1786 ; RV32-NEXT: vsrl.vi v16, v8, 4
1787 ; RV32-NEXT: vadd.vv v8, v8, v16
1788 ; RV32-NEXT: lui a1, 61681
1789 ; RV32-NEXT: addi a1, a1, -241
1790 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1791 ; RV32-NEXT: vmv.v.x v16, a1
1792 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1793 ; RV32-NEXT: vand.vv v8, v8, v16
1794 ; RV32-NEXT: lui a1, 4112
1795 ; RV32-NEXT: addi a1, a1, 257
1796 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1797 ; RV32-NEXT: vmv.v.x v16, a1
1798 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1799 ; RV32-NEXT: vmul.vv v8, v8, v16
1800 ; RV32-NEXT: li a0, 56
1801 ; RV32-NEXT: vsrl.vx v8, v8, a0
1804 ; RV64-LABEL: vp_ctpop_nxv7i64_unmasked:
1806 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1807 ; RV64-NEXT: vsrl.vi v16, v8, 1
1808 ; RV64-NEXT: lui a0, 349525
1809 ; RV64-NEXT: addiw a0, a0, 1365
1810 ; RV64-NEXT: slli a1, a0, 32
1811 ; RV64-NEXT: add a0, a0, a1
1812 ; RV64-NEXT: vand.vx v16, v16, a0
1813 ; RV64-NEXT: vsub.vv v8, v8, v16
1814 ; RV64-NEXT: lui a0, 209715
1815 ; RV64-NEXT: addiw a0, a0, 819
1816 ; RV64-NEXT: slli a1, a0, 32
1817 ; RV64-NEXT: add a0, a0, a1
1818 ; RV64-NEXT: vand.vx v16, v8, a0
1819 ; RV64-NEXT: vsrl.vi v8, v8, 2
1820 ; RV64-NEXT: vand.vx v8, v8, a0
1821 ; RV64-NEXT: vadd.vv v8, v16, v8
1822 ; RV64-NEXT: vsrl.vi v16, v8, 4
1823 ; RV64-NEXT: vadd.vv v8, v8, v16
1824 ; RV64-NEXT: lui a0, 61681
1825 ; RV64-NEXT: addiw a0, a0, -241
1826 ; RV64-NEXT: slli a1, a0, 32
1827 ; RV64-NEXT: add a0, a0, a1
1828 ; RV64-NEXT: vand.vx v8, v8, a0
1829 ; RV64-NEXT: lui a0, 4112
1830 ; RV64-NEXT: addiw a0, a0, 257
1831 ; RV64-NEXT: slli a1, a0, 32
1832 ; RV64-NEXT: add a0, a0, a1
1833 ; RV64-NEXT: vmul.vx v8, v8, a0
1834 ; RV64-NEXT: li a0, 56
1835 ; RV64-NEXT: vsrl.vx v8, v8, a0
1838 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv7i64_unmasked:
1839 ; CHECK-ZVBB: # %bb.0:
1840 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1841 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
1842 ; CHECK-ZVBB-NEXT: ret
1843 %v = call <vscale x 7 x i64> @llvm.vp.ctpop.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
1844 ret <vscale x 7 x i64> %v
1847 declare <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1849 define <vscale x 8 x i64> @vp_ctpop_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1850 ; RV32-LABEL: vp_ctpop_nxv8i64:
1852 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1853 ; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
1854 ; RV32-NEXT: lui a1, 349525
1855 ; RV32-NEXT: addi a1, a1, 1365
1856 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1857 ; RV32-NEXT: vmv.v.x v24, a1
1858 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1859 ; RV32-NEXT: vand.vv v16, v16, v24, v0.t
1860 ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
1861 ; RV32-NEXT: lui a1, 209715
1862 ; RV32-NEXT: addi a1, a1, 819
1863 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1864 ; RV32-NEXT: vmv.v.x v16, a1
1865 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1866 ; RV32-NEXT: vand.vv v24, v8, v16, v0.t
1867 ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
1868 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
1869 ; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
1870 ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
1871 ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
1872 ; RV32-NEXT: lui a1, 61681
1873 ; RV32-NEXT: addi a1, a1, -241
1874 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1875 ; RV32-NEXT: vmv.v.x v16, a1
1876 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1877 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
1878 ; RV32-NEXT: lui a1, 4112
1879 ; RV32-NEXT: addi a1, a1, 257
1880 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1881 ; RV32-NEXT: vmv.v.x v16, a1
1882 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1883 ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
1884 ; RV32-NEXT: li a0, 56
1885 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
1888 ; RV64-LABEL: vp_ctpop_nxv8i64:
1890 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1891 ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
1892 ; RV64-NEXT: lui a0, 349525
1893 ; RV64-NEXT: addiw a0, a0, 1365
1894 ; RV64-NEXT: slli a1, a0, 32
1895 ; RV64-NEXT: add a0, a0, a1
1896 ; RV64-NEXT: vand.vx v16, v16, a0, v0.t
1897 ; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
1898 ; RV64-NEXT: lui a0, 209715
1899 ; RV64-NEXT: addiw a0, a0, 819
1900 ; RV64-NEXT: slli a1, a0, 32
1901 ; RV64-NEXT: add a0, a0, a1
1902 ; RV64-NEXT: vand.vx v16, v8, a0, v0.t
1903 ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
1904 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1905 ; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
1906 ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
1907 ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
1908 ; RV64-NEXT: lui a0, 61681
1909 ; RV64-NEXT: addiw a0, a0, -241
1910 ; RV64-NEXT: slli a1, a0, 32
1911 ; RV64-NEXT: add a0, a0, a1
1912 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1913 ; RV64-NEXT: lui a0, 4112
1914 ; RV64-NEXT: addiw a0, a0, 257
1915 ; RV64-NEXT: slli a1, a0, 32
1916 ; RV64-NEXT: add a0, a0, a1
1917 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
1918 ; RV64-NEXT: li a0, 56
1919 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
1922 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv8i64:
1923 ; CHECK-ZVBB: # %bb.0:
1924 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1925 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
1926 ; CHECK-ZVBB-NEXT: ret
1927 %v = call <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
1928 ret <vscale x 8 x i64> %v
1931 define <vscale x 8 x i64> @vp_ctpop_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1932 ; RV32-LABEL: vp_ctpop_nxv8i64_unmasked:
1934 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1935 ; RV32-NEXT: vsrl.vi v16, v8, 1
1936 ; RV32-NEXT: lui a1, 349525
1937 ; RV32-NEXT: addi a1, a1, 1365
1938 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1939 ; RV32-NEXT: vmv.v.x v24, a1
1940 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1941 ; RV32-NEXT: vand.vv v16, v16, v24
1942 ; RV32-NEXT: vsub.vv v8, v8, v16
1943 ; RV32-NEXT: lui a1, 209715
1944 ; RV32-NEXT: addi a1, a1, 819
1945 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1946 ; RV32-NEXT: vmv.v.x v16, a1
1947 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1948 ; RV32-NEXT: vand.vv v24, v8, v16
1949 ; RV32-NEXT: vsrl.vi v8, v8, 2
1950 ; RV32-NEXT: vand.vv v8, v8, v16
1951 ; RV32-NEXT: vadd.vv v8, v24, v8
1952 ; RV32-NEXT: vsrl.vi v16, v8, 4
1953 ; RV32-NEXT: vadd.vv v8, v8, v16
1954 ; RV32-NEXT: lui a1, 61681
1955 ; RV32-NEXT: addi a1, a1, -241
1956 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1957 ; RV32-NEXT: vmv.v.x v16, a1
1958 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1959 ; RV32-NEXT: vand.vv v8, v8, v16
1960 ; RV32-NEXT: lui a1, 4112
1961 ; RV32-NEXT: addi a1, a1, 257
1962 ; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
1963 ; RV32-NEXT: vmv.v.x v16, a1
1964 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1965 ; RV32-NEXT: vmul.vv v8, v8, v16
1966 ; RV32-NEXT: li a0, 56
1967 ; RV32-NEXT: vsrl.vx v8, v8, a0
1970 ; RV64-LABEL: vp_ctpop_nxv8i64_unmasked:
1972 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1973 ; RV64-NEXT: vsrl.vi v16, v8, 1
1974 ; RV64-NEXT: lui a0, 349525
1975 ; RV64-NEXT: addiw a0, a0, 1365
1976 ; RV64-NEXT: slli a1, a0, 32
1977 ; RV64-NEXT: add a0, a0, a1
1978 ; RV64-NEXT: vand.vx v16, v16, a0
1979 ; RV64-NEXT: vsub.vv v8, v8, v16
1980 ; RV64-NEXT: lui a0, 209715
1981 ; RV64-NEXT: addiw a0, a0, 819
1982 ; RV64-NEXT: slli a1, a0, 32
1983 ; RV64-NEXT: add a0, a0, a1
1984 ; RV64-NEXT: vand.vx v16, v8, a0
1985 ; RV64-NEXT: vsrl.vi v8, v8, 2
1986 ; RV64-NEXT: vand.vx v8, v8, a0
1987 ; RV64-NEXT: vadd.vv v8, v16, v8
1988 ; RV64-NEXT: vsrl.vi v16, v8, 4
1989 ; RV64-NEXT: vadd.vv v8, v8, v16
1990 ; RV64-NEXT: lui a0, 61681
1991 ; RV64-NEXT: addiw a0, a0, -241
1992 ; RV64-NEXT: slli a1, a0, 32
1993 ; RV64-NEXT: add a0, a0, a1
1994 ; RV64-NEXT: vand.vx v8, v8, a0
1995 ; RV64-NEXT: lui a0, 4112
1996 ; RV64-NEXT: addiw a0, a0, 257
1997 ; RV64-NEXT: slli a1, a0, 32
1998 ; RV64-NEXT: add a0, a0, a1
1999 ; RV64-NEXT: vmul.vx v8, v8, a0
2000 ; RV64-NEXT: li a0, 56
2001 ; RV64-NEXT: vsrl.vx v8, v8, a0
2004 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv8i64_unmasked:
2005 ; CHECK-ZVBB: # %bb.0:
2006 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2007 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
2008 ; CHECK-ZVBB-NEXT: ret
2009 %v = call <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
2010 ret <vscale x 8 x i64> %v
2013 declare <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i1>, i32)
2015 define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
2016 ; RV32-LABEL: vp_ctpop_nxv16i64:
2018 ; RV32-NEXT: addi sp, sp, -16
2019 ; RV32-NEXT: .cfi_def_cfa_offset 16
2020 ; RV32-NEXT: csrr a1, vlenb
2021 ; RV32-NEXT: li a2, 56
2022 ; RV32-NEXT: mul a1, a1, a2
2023 ; RV32-NEXT: sub sp, sp, a1
2024 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
2025 ; RV32-NEXT: vmv1r.v v24, v0
2026 ; RV32-NEXT: csrr a1, vlenb
2027 ; RV32-NEXT: slli a1, a1, 5
2028 ; RV32-NEXT: add a1, sp, a1
2029 ; RV32-NEXT: addi a1, a1, 16
2030 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
2031 ; RV32-NEXT: csrr a1, vlenb
2032 ; RV32-NEXT: li a2, 48
2033 ; RV32-NEXT: mul a1, a1, a2
2034 ; RV32-NEXT: add a1, sp, a1
2035 ; RV32-NEXT: addi a1, a1, 16
2036 ; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
2037 ; RV32-NEXT: csrr a1, vlenb
2038 ; RV32-NEXT: srli a2, a1, 3
2039 ; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
2040 ; RV32-NEXT: vslidedown.vx v0, v0, a2
2041 ; RV32-NEXT: sub a2, a0, a1
2042 ; RV32-NEXT: sltu a3, a0, a2
2043 ; RV32-NEXT: addi a3, a3, -1
2044 ; RV32-NEXT: and a2, a3, a2
2045 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2046 ; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
2047 ; RV32-NEXT: csrr a3, vlenb
2048 ; RV32-NEXT: li a4, 40
2049 ; RV32-NEXT: mul a3, a3, a4
2050 ; RV32-NEXT: add a3, sp, a3
2051 ; RV32-NEXT: addi a3, a3, 16
2052 ; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
2053 ; RV32-NEXT: lui a3, 349525
2054 ; RV32-NEXT: addi a3, a3, 1365
2055 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
2056 ; RV32-NEXT: vmv.v.x v16, a3
2057 ; RV32-NEXT: csrr a3, vlenb
2058 ; RV32-NEXT: li a4, 24
2059 ; RV32-NEXT: mul a3, a3, a4
2060 ; RV32-NEXT: add a3, sp, a3
2061 ; RV32-NEXT: addi a3, a3, 16
2062 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
2063 ; RV32-NEXT: csrr a3, vlenb
2064 ; RV32-NEXT: li a4, 40
2065 ; RV32-NEXT: mul a3, a3, a4
2066 ; RV32-NEXT: add a3, sp, a3
2067 ; RV32-NEXT: addi a3, a3, 16
2068 ; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
2069 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2070 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
2071 ; RV32-NEXT: csrr a3, vlenb
2072 ; RV32-NEXT: slli a3, a3, 5
2073 ; RV32-NEXT: add a3, sp, a3
2074 ; RV32-NEXT: addi a3, a3, 16
2075 ; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
2076 ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
2077 ; RV32-NEXT: csrr a3, vlenb
2078 ; RV32-NEXT: slli a3, a3, 5
2079 ; RV32-NEXT: add a3, sp, a3
2080 ; RV32-NEXT: addi a3, a3, 16
2081 ; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
2082 ; RV32-NEXT: lui a3, 209715
2083 ; RV32-NEXT: addi a3, a3, 819
2084 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
2085 ; RV32-NEXT: vmv.v.x v16, a3
2086 ; RV32-NEXT: csrr a3, vlenb
2087 ; RV32-NEXT: slli a3, a3, 5
2088 ; RV32-NEXT: add a3, sp, a3
2089 ; RV32-NEXT: addi a3, a3, 16
2090 ; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
2091 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2092 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
2093 ; RV32-NEXT: csrr a3, vlenb
2094 ; RV32-NEXT: slli a3, a3, 4
2095 ; RV32-NEXT: add a3, sp, a3
2096 ; RV32-NEXT: addi a3, a3, 16
2097 ; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
2098 ; RV32-NEXT: csrr a3, vlenb
2099 ; RV32-NEXT: slli a3, a3, 5
2100 ; RV32-NEXT: add a3, sp, a3
2101 ; RV32-NEXT: addi a3, a3, 16
2102 ; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
2103 ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
2104 ; RV32-NEXT: csrr a3, vlenb
2105 ; RV32-NEXT: li a4, 40
2106 ; RV32-NEXT: mul a3, a3, a4
2107 ; RV32-NEXT: add a3, sp, a3
2108 ; RV32-NEXT: addi a3, a3, 16
2109 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
2110 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
2111 ; RV32-NEXT: csrr a3, vlenb
2112 ; RV32-NEXT: slli a3, a3, 4
2113 ; RV32-NEXT: add a3, sp, a3
2114 ; RV32-NEXT: addi a3, a3, 16
2115 ; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
2116 ; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
2117 ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
2118 ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
2119 ; RV32-NEXT: lui a3, 61681
2120 ; RV32-NEXT: addi a3, a3, -241
2121 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
2122 ; RV32-NEXT: vmv.v.x v16, a3
2123 ; RV32-NEXT: csrr a3, vlenb
2124 ; RV32-NEXT: slli a3, a3, 5
2125 ; RV32-NEXT: add a3, sp, a3
2126 ; RV32-NEXT: addi a3, a3, 16
2127 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
2128 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2129 ; RV32-NEXT: vand.vv v16, v8, v16, v0.t
2130 ; RV32-NEXT: lui a3, 4112
2131 ; RV32-NEXT: addi a3, a3, 257
2132 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
2133 ; RV32-NEXT: vmv.v.x v8, a3
2134 ; RV32-NEXT: csrr a3, vlenb
2135 ; RV32-NEXT: slli a3, a3, 4
2136 ; RV32-NEXT: add a3, sp, a3
2137 ; RV32-NEXT: addi a3, a3, 16
2138 ; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
2139 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2140 ; RV32-NEXT: vmul.vv v16, v16, v8, v0.t
2141 ; RV32-NEXT: li a2, 56
2142 ; RV32-NEXT: vsrl.vx v8, v16, a2, v0.t
2143 ; RV32-NEXT: csrr a3, vlenb
2144 ; RV32-NEXT: slli a3, a3, 3
2145 ; RV32-NEXT: add a3, sp, a3
2146 ; RV32-NEXT: addi a3, a3, 16
2147 ; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
2148 ; RV32-NEXT: bltu a0, a1, .LBB46_2
2149 ; RV32-NEXT: # %bb.1:
2150 ; RV32-NEXT: mv a0, a1
2151 ; RV32-NEXT: .LBB46_2:
2152 ; RV32-NEXT: vmv1r.v v0, v24
2153 ; RV32-NEXT: li a3, 48
2154 ; RV32-NEXT: mul a1, a1, a3
2155 ; RV32-NEXT: add a1, sp, a1
2156 ; RV32-NEXT: addi a1, a1, 16
2157 ; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
2158 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2159 ; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
2160 ; RV32-NEXT: addi a0, sp, 16
2161 ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
2162 ; RV32-NEXT: csrr a0, vlenb
2163 ; RV32-NEXT: li a1, 24
2164 ; RV32-NEXT: mul a0, a0, a1
2165 ; RV32-NEXT: add a0, sp, a0
2166 ; RV32-NEXT: addi a0, a0, 16
2167 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
2168 ; RV32-NEXT: addi a0, sp, 16
2169 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
2170 ; RV32-NEXT: vand.vv v16, v8, v16, v0.t
2171 ; RV32-NEXT: csrr a0, vlenb
2172 ; RV32-NEXT: li a1, 48
2173 ; RV32-NEXT: mul a0, a0, a1
2174 ; RV32-NEXT: add a0, sp, a0
2175 ; RV32-NEXT: addi a0, a0, 16
2176 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
2177 ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
2178 ; RV32-NEXT: csrr a0, vlenb
2179 ; RV32-NEXT: li a1, 48
2180 ; RV32-NEXT: mul a0, a0, a1
2181 ; RV32-NEXT: add a0, sp, a0
2182 ; RV32-NEXT: addi a0, a0, 16
2183 ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
2184 ; RV32-NEXT: csrr a0, vlenb
2185 ; RV32-NEXT: li a1, 40
2186 ; RV32-NEXT: mul a0, a0, a1
2187 ; RV32-NEXT: add a0, sp, a0
2188 ; RV32-NEXT: addi a0, a0, 16
2189 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
2190 ; RV32-NEXT: csrr a0, vlenb
2191 ; RV32-NEXT: li a1, 48
2192 ; RV32-NEXT: mul a0, a0, a1
2193 ; RV32-NEXT: add a0, sp, a0
2194 ; RV32-NEXT: addi a0, a0, 16
2195 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
2196 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
2197 ; RV32-NEXT: csrr a0, vlenb
2198 ; RV32-NEXT: li a1, 24
2199 ; RV32-NEXT: mul a0, a0, a1
2200 ; RV32-NEXT: add a0, sp, a0
2201 ; RV32-NEXT: addi a0, a0, 16
2202 ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
2203 ; RV32-NEXT: csrr a0, vlenb
2204 ; RV32-NEXT: li a1, 48
2205 ; RV32-NEXT: mul a0, a0, a1
2206 ; RV32-NEXT: add a0, sp, a0
2207 ; RV32-NEXT: addi a0, a0, 16
2208 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
2209 ; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
2210 ; RV32-NEXT: csrr a0, vlenb
2211 ; RV32-NEXT: li a1, 40
2212 ; RV32-NEXT: mul a0, a0, a1
2213 ; RV32-NEXT: add a0, sp, a0
2214 ; RV32-NEXT: addi a0, a0, 16
2215 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
2216 ; RV32-NEXT: vand.vv v16, v16, v8, v0.t
2217 ; RV32-NEXT: csrr a0, vlenb
2218 ; RV32-NEXT: li a1, 24
2219 ; RV32-NEXT: mul a0, a0, a1
2220 ; RV32-NEXT: add a0, sp, a0
2221 ; RV32-NEXT: addi a0, a0, 16
2222 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
2223 ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
2224 ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
2225 ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
2226 ; RV32-NEXT: csrr a0, vlenb
2227 ; RV32-NEXT: slli a0, a0, 5
2228 ; RV32-NEXT: add a0, sp, a0
2229 ; RV32-NEXT: addi a0, a0, 16
2230 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
2231 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
2232 ; RV32-NEXT: csrr a0, vlenb
2233 ; RV32-NEXT: slli a0, a0, 4
2234 ; RV32-NEXT: add a0, sp, a0
2235 ; RV32-NEXT: addi a0, a0, 16
2236 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
2237 ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
2238 ; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
2239 ; RV32-NEXT: csrr a0, vlenb
2240 ; RV32-NEXT: slli a0, a0, 3
2241 ; RV32-NEXT: add a0, sp, a0
2242 ; RV32-NEXT: addi a0, a0, 16
2243 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
2244 ; RV32-NEXT: csrr a0, vlenb
2245 ; RV32-NEXT: li a1, 56
2246 ; RV32-NEXT: mul a0, a0, a1
2247 ; RV32-NEXT: add sp, sp, a0
2248 ; RV32-NEXT: addi sp, sp, 16
2251 ; RV64-LABEL: vp_ctpop_nxv16i64:
2253 ; RV64-NEXT: addi sp, sp, -16
2254 ; RV64-NEXT: .cfi_def_cfa_offset 16
2255 ; RV64-NEXT: csrr a1, vlenb
2256 ; RV64-NEXT: slli a1, a1, 4
2257 ; RV64-NEXT: sub sp, sp, a1
2258 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
2259 ; RV64-NEXT: csrr a1, vlenb
2260 ; RV64-NEXT: slli a1, a1, 3
2261 ; RV64-NEXT: add a1, sp, a1
2262 ; RV64-NEXT: addi a1, a1, 16
2263 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
2264 ; RV64-NEXT: csrr a1, vlenb
2265 ; RV64-NEXT: srli a2, a1, 3
2266 ; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
2267 ; RV64-NEXT: vslidedown.vx v24, v0, a2
2268 ; RV64-NEXT: mv a2, a0
2269 ; RV64-NEXT: bltu a0, a1, .LBB46_2
2270 ; RV64-NEXT: # %bb.1:
2271 ; RV64-NEXT: mv a2, a1
2272 ; RV64-NEXT: .LBB46_2:
2273 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2274 ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
2275 ; RV64-NEXT: lui a2, 349525
2276 ; RV64-NEXT: addiw a2, a2, 1365
2277 ; RV64-NEXT: slli a3, a2, 32
2278 ; RV64-NEXT: add a2, a2, a3
2279 ; RV64-NEXT: vand.vx v16, v16, a2, v0.t
2280 ; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
2281 ; RV64-NEXT: lui a3, 209715
2282 ; RV64-NEXT: addiw a3, a3, 819
2283 ; RV64-NEXT: slli a4, a3, 32
2284 ; RV64-NEXT: add a3, a3, a4
2285 ; RV64-NEXT: vand.vx v16, v8, a3, v0.t
2286 ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
2287 ; RV64-NEXT: vand.vx v8, v8, a3, v0.t
2288 ; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
2289 ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
2290 ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
2291 ; RV64-NEXT: lui a4, 61681
2292 ; RV64-NEXT: addiw a4, a4, -241
2293 ; RV64-NEXT: slli a5, a4, 32
2294 ; RV64-NEXT: add a4, a4, a5
2295 ; RV64-NEXT: vand.vx v8, v8, a4, v0.t
2296 ; RV64-NEXT: lui a5, 4112
2297 ; RV64-NEXT: addiw a5, a5, 257
2298 ; RV64-NEXT: slli a6, a5, 32
2299 ; RV64-NEXT: add a5, a5, a6
2300 ; RV64-NEXT: vmul.vx v8, v8, a5, v0.t
2301 ; RV64-NEXT: li a6, 56
2302 ; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t
2303 ; RV64-NEXT: addi a7, sp, 16
2304 ; RV64-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill
2305 ; RV64-NEXT: sub a1, a0, a1
2306 ; RV64-NEXT: sltu a0, a0, a1
2307 ; RV64-NEXT: addi a0, a0, -1
2308 ; RV64-NEXT: and a0, a0, a1
2309 ; RV64-NEXT: vmv1r.v v0, v24
2310 ; RV64-NEXT: csrr a1, vlenb
2311 ; RV64-NEXT: slli a1, a1, 3
2312 ; RV64-NEXT: add a1, sp, a1
2313 ; RV64-NEXT: addi a1, a1, 16
2314 ; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
2315 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2316 ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
2317 ; RV64-NEXT: vand.vx v16, v16, a2, v0.t
2318 ; RV64-NEXT: vsub.vv v16, v8, v16, v0.t
2319 ; RV64-NEXT: vand.vx v8, v16, a3, v0.t
2320 ; RV64-NEXT: vsrl.vi v16, v16, 2, v0.t
2321 ; RV64-NEXT: vand.vx v16, v16, a3, v0.t
2322 ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
2323 ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
2324 ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
2325 ; RV64-NEXT: vand.vx v8, v8, a4, v0.t
2326 ; RV64-NEXT: vmul.vx v8, v8, a5, v0.t
2327 ; RV64-NEXT: vsrl.vx v16, v8, a6, v0.t
2328 ; RV64-NEXT: addi a0, sp, 16
2329 ; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
2330 ; RV64-NEXT: csrr a0, vlenb
2331 ; RV64-NEXT: slli a0, a0, 4
2332 ; RV64-NEXT: add sp, sp, a0
2333 ; RV64-NEXT: addi sp, sp, 16
2336 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i64:
2337 ; CHECK-ZVBB: # %bb.0:
2338 ; CHECK-ZVBB-NEXT: vmv1r.v v24, v0
2339 ; CHECK-ZVBB-NEXT: csrr a1, vlenb
2340 ; CHECK-ZVBB-NEXT: srli a2, a1, 3
2341 ; CHECK-ZVBB-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
2342 ; CHECK-ZVBB-NEXT: vslidedown.vx v0, v0, a2
2343 ; CHECK-ZVBB-NEXT: sub a2, a0, a1
2344 ; CHECK-ZVBB-NEXT: sltu a3, a0, a2
2345 ; CHECK-ZVBB-NEXT: addi a3, a3, -1
2346 ; CHECK-ZVBB-NEXT: and a2, a3, a2
2347 ; CHECK-ZVBB-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2348 ; CHECK-ZVBB-NEXT: vcpop.v v16, v16, v0.t
2349 ; CHECK-ZVBB-NEXT: bltu a0, a1, .LBB46_2
2350 ; CHECK-ZVBB-NEXT: # %bb.1:
2351 ; CHECK-ZVBB-NEXT: mv a0, a1
2352 ; CHECK-ZVBB-NEXT: .LBB46_2:
2353 ; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
2354 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2355 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
2356 ; CHECK-ZVBB-NEXT: ret
2357 %v = call <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 %evl)
2358 ret <vscale x 16 x i64> %v
2361 define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va, i32 zeroext %evl) {
2362 ; RV32-LABEL: vp_ctpop_nxv16i64_unmasked:
2364 ; RV32-NEXT: addi sp, sp, -16
2365 ; RV32-NEXT: .cfi_def_cfa_offset 16
2366 ; RV32-NEXT: csrr a1, vlenb
2367 ; RV32-NEXT: slli a1, a1, 5
2368 ; RV32-NEXT: sub sp, sp, a1
2369 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
2370 ; RV32-NEXT: csrr a1, vlenb
2371 ; RV32-NEXT: sub a2, a0, a1
2372 ; RV32-NEXT: sltu a3, a0, a2
2373 ; RV32-NEXT: addi a3, a3, -1
2374 ; RV32-NEXT: and a2, a3, a2
2375 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2376 ; RV32-NEXT: vsrl.vi v24, v16, 1
2377 ; RV32-NEXT: lui a3, 349525
2378 ; RV32-NEXT: addi a3, a3, 1365
2379 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
2380 ; RV32-NEXT: vmv.v.x v0, a3
2381 ; RV32-NEXT: csrr a3, vlenb
2382 ; RV32-NEXT: li a4, 24
2383 ; RV32-NEXT: mul a3, a3, a4
2384 ; RV32-NEXT: add a3, sp, a3
2385 ; RV32-NEXT: addi a3, a3, 16
2386 ; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
2387 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2388 ; RV32-NEXT: vand.vv v24, v24, v0
2389 ; RV32-NEXT: vsub.vv v24, v16, v24
2390 ; RV32-NEXT: lui a3, 209715
2391 ; RV32-NEXT: addi a3, a3, 819
2392 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
2393 ; RV32-NEXT: vmv.v.x v0, a3
2394 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2395 ; RV32-NEXT: vand.vv v16, v24, v0
2396 ; RV32-NEXT: vsrl.vi v24, v24, 2
2397 ; RV32-NEXT: csrr a3, vlenb
2398 ; RV32-NEXT: slli a3, a3, 4
2399 ; RV32-NEXT: add a3, sp, a3
2400 ; RV32-NEXT: addi a3, a3, 16
2401 ; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
2402 ; RV32-NEXT: vand.vv v24, v24, v0
2403 ; RV32-NEXT: vadd.vv v24, v16, v24
2404 ; RV32-NEXT: vsrl.vi v16, v24, 4
2405 ; RV32-NEXT: vadd.vv v16, v24, v16
2406 ; RV32-NEXT: lui a3, 61681
2407 ; RV32-NEXT: addi a3, a3, -241
2408 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
2409 ; RV32-NEXT: vmv.v.x v24, a3
2410 ; RV32-NEXT: csrr a3, vlenb
2411 ; RV32-NEXT: slli a3, a3, 3
2412 ; RV32-NEXT: add a3, sp, a3
2413 ; RV32-NEXT: addi a3, a3, 16
2414 ; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
2415 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2416 ; RV32-NEXT: vand.vv v16, v16, v24
2417 ; RV32-NEXT: lui a3, 4112
2418 ; RV32-NEXT: addi a3, a3, 257
2419 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
2420 ; RV32-NEXT: vmv.v.x v24, a3
2421 ; RV32-NEXT: addi a3, sp, 16
2422 ; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
2423 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2424 ; RV32-NEXT: vmul.vv v16, v16, v24
2425 ; RV32-NEXT: li a2, 56
2426 ; RV32-NEXT: vsrl.vx v16, v16, a2
2427 ; RV32-NEXT: bltu a0, a1, .LBB47_2
2428 ; RV32-NEXT: # %bb.1:
2429 ; RV32-NEXT: mv a0, a1
2430 ; RV32-NEXT: .LBB47_2:
2431 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2432 ; RV32-NEXT: vsrl.vi v24, v8, 1
2433 ; RV32-NEXT: csrr a0, vlenb
2434 ; RV32-NEXT: li a1, 24
2435 ; RV32-NEXT: mul a0, a0, a1
2436 ; RV32-NEXT: add a0, sp, a0
2437 ; RV32-NEXT: addi a0, a0, 16
2438 ; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
2439 ; RV32-NEXT: vand.vv v24, v24, v0
2440 ; RV32-NEXT: vsub.vv v24, v8, v24
2441 ; RV32-NEXT: csrr a0, vlenb
2442 ; RV32-NEXT: slli a0, a0, 4
2443 ; RV32-NEXT: add a0, sp, a0
2444 ; RV32-NEXT: addi a0, a0, 16
2445 ; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
2446 ; RV32-NEXT: vand.vv v8, v24, v0
2447 ; RV32-NEXT: vsrl.vi v24, v24, 2
2448 ; RV32-NEXT: vand.vv v24, v24, v0
2449 ; RV32-NEXT: vadd.vv v8, v8, v24
2450 ; RV32-NEXT: vsrl.vi v24, v8, 4
2451 ; RV32-NEXT: vadd.vv v8, v8, v24
2452 ; RV32-NEXT: csrr a0, vlenb
2453 ; RV32-NEXT: slli a0, a0, 3
2454 ; RV32-NEXT: add a0, sp, a0
2455 ; RV32-NEXT: addi a0, a0, 16
2456 ; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
2457 ; RV32-NEXT: vand.vv v8, v8, v24
2458 ; RV32-NEXT: addi a0, sp, 16
2459 ; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
2460 ; RV32-NEXT: vmul.vv v8, v8, v24
2461 ; RV32-NEXT: vsrl.vx v8, v8, a2
2462 ; RV32-NEXT: csrr a0, vlenb
2463 ; RV32-NEXT: slli a0, a0, 5
2464 ; RV32-NEXT: add sp, sp, a0
2465 ; RV32-NEXT: addi sp, sp, 16
2468 ; RV64-LABEL: vp_ctpop_nxv16i64_unmasked:
2470 ; RV64-NEXT: csrr a1, vlenb
2471 ; RV64-NEXT: mv a2, a0
2472 ; RV64-NEXT: bltu a0, a1, .LBB47_2
2473 ; RV64-NEXT: # %bb.1:
2474 ; RV64-NEXT: mv a2, a1
2475 ; RV64-NEXT: .LBB47_2:
2476 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2477 ; RV64-NEXT: vsrl.vi v24, v8, 1
2478 ; RV64-NEXT: lui a2, 349525
2479 ; RV64-NEXT: addiw a2, a2, 1365
2480 ; RV64-NEXT: slli a3, a2, 32
2481 ; RV64-NEXT: add a2, a2, a3
2482 ; RV64-NEXT: vand.vx v24, v24, a2
2483 ; RV64-NEXT: vsub.vv v8, v8, v24
2484 ; RV64-NEXT: lui a3, 209715
2485 ; RV64-NEXT: addiw a3, a3, 819
2486 ; RV64-NEXT: slli a4, a3, 32
2487 ; RV64-NEXT: add a3, a3, a4
2488 ; RV64-NEXT: vand.vx v24, v8, a3
2489 ; RV64-NEXT: vsrl.vi v8, v8, 2
2490 ; RV64-NEXT: vand.vx v8, v8, a3
2491 ; RV64-NEXT: vadd.vv v8, v24, v8
2492 ; RV64-NEXT: vsrl.vi v24, v8, 4
2493 ; RV64-NEXT: vadd.vv v8, v8, v24
2494 ; RV64-NEXT: lui a4, 61681
2495 ; RV64-NEXT: addiw a4, a4, -241
2496 ; RV64-NEXT: slli a5, a4, 32
2497 ; RV64-NEXT: add a4, a4, a5
2498 ; RV64-NEXT: vand.vx v8, v8, a4
2499 ; RV64-NEXT: lui a5, 4112
2500 ; RV64-NEXT: addiw a5, a5, 257
2501 ; RV64-NEXT: slli a6, a5, 32
2502 ; RV64-NEXT: add a5, a5, a6
2503 ; RV64-NEXT: vmul.vx v8, v8, a5
2504 ; RV64-NEXT: li a6, 56
2505 ; RV64-NEXT: vsrl.vx v8, v8, a6
2506 ; RV64-NEXT: sub a1, a0, a1
2507 ; RV64-NEXT: sltu a0, a0, a1
2508 ; RV64-NEXT: addi a0, a0, -1
2509 ; RV64-NEXT: and a0, a0, a1
2510 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2511 ; RV64-NEXT: vsrl.vi v24, v16, 1
2512 ; RV64-NEXT: vand.vx v24, v24, a2
2513 ; RV64-NEXT: vsub.vv v16, v16, v24
2514 ; RV64-NEXT: vand.vx v24, v16, a3
2515 ; RV64-NEXT: vsrl.vi v16, v16, 2
2516 ; RV64-NEXT: vand.vx v16, v16, a3
2517 ; RV64-NEXT: vadd.vv v16, v24, v16
2518 ; RV64-NEXT: vsrl.vi v24, v16, 4
2519 ; RV64-NEXT: vadd.vv v16, v16, v24
2520 ; RV64-NEXT: vand.vx v16, v16, a4
2521 ; RV64-NEXT: vmul.vx v16, v16, a5
2522 ; RV64-NEXT: vsrl.vx v16, v16, a6
2525 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv16i64_unmasked:
2526 ; CHECK-ZVBB: # %bb.0:
2527 ; CHECK-ZVBB-NEXT: csrr a1, vlenb
2528 ; CHECK-ZVBB-NEXT: sub a2, a0, a1
2529 ; CHECK-ZVBB-NEXT: sltu a3, a0, a2
2530 ; CHECK-ZVBB-NEXT: addi a3, a3, -1
2531 ; CHECK-ZVBB-NEXT: and a2, a3, a2
2532 ; CHECK-ZVBB-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2533 ; CHECK-ZVBB-NEXT: vcpop.v v16, v16
2534 ; CHECK-ZVBB-NEXT: bltu a0, a1, .LBB47_2
2535 ; CHECK-ZVBB-NEXT: # %bb.1:
2536 ; CHECK-ZVBB-NEXT: mv a0, a1
2537 ; CHECK-ZVBB-NEXT: .LBB47_2:
2538 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2539 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8
2540 ; CHECK-ZVBB-NEXT: ret
2541 %v = call <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
2542 ret <vscale x 16 x i64> %v
2546 declare <vscale x 1 x i9> @llvm.vp.ctpop.nxv1i9(<vscale x 1 x i9>, <vscale x 1 x i1>, i32)
2548 define <vscale x 1 x i9> @vp_ctpop_nxv1i9(<vscale x 1 x i9> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
2549 ; CHECK-LABEL: vp_ctpop_nxv1i9:
2551 ; CHECK-NEXT: li a1, 511
2552 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2553 ; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
2554 ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
2555 ; CHECK-NEXT: lui a0, 5
2556 ; CHECK-NEXT: addi a0, a0, 1365
2557 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
2558 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
2559 ; CHECK-NEXT: lui a0, 3
2560 ; CHECK-NEXT: addi a0, a0, 819
2561 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
2562 ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
2563 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
2564 ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
2565 ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
2566 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
2567 ; CHECK-NEXT: lui a0, 1
2568 ; CHECK-NEXT: addi a0, a0, -241
2569 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
2570 ; CHECK-NEXT: li a0, 257
2571 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
2572 ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t
2575 ; CHECK-ZVBB-LABEL: vp_ctpop_nxv1i9:
2576 ; CHECK-ZVBB: # %bb.0:
2577 ; CHECK-ZVBB-NEXT: li a1, 511
2578 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2579 ; CHECK-ZVBB-NEXT: vand.vx v8, v8, a1, v0.t
2580 ; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
2581 ; CHECK-ZVBB-NEXT: ret
2582 %v = call <vscale x 1 x i9> @llvm.vp.ctpop.nxv1i9(<vscale x 1 x i9> %va, <vscale x 1 x i1> %m, i32 %evl)
2583 ret <vscale x 1 x i9> %v