1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -S -instcombine | FileCheck %s
4 declare i32 @llvm.ctpop.i32(i32)
5 declare i64 @llvm.ctpop.i64(i64)
6 declare i8 @llvm.ctpop.i8(i8)
7 declare i7 @llvm.ctpop.i7(i7)
8 declare i1 @llvm.ctpop.i1(i1)
9 declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
10 declare void @llvm.assume(i1)
11 declare void @use(i32)
13 define i1 @test1(i32 %arg) {
14 ; CHECK-LABEL: @test1(
15 ; CHECK-NEXT: ret i1 false
17 %and = and i32 %arg, 15
18 %cnt = call i32 @llvm.ctpop.i32(i32 %and)
19 %res = icmp eq i32 %cnt, 9
23 define i1 @test2(i32 %arg) {
24 ; CHECK-LABEL: @test2(
25 ; CHECK-NEXT: ret i1 false
27 %and = and i32 %arg, 1
28 %cnt = call i32 @llvm.ctpop.i32(i32 %and)
29 %res = icmp eq i32 %cnt, 2
33 define i1 @test3(i32 %arg) {
34 ; CHECK-LABEL: @test3(
35 ; CHECK-NEXT: [[ASSUME:%.*]] = icmp eq i32 [[ARG:%.*]], 0
36 ; CHECK-NEXT: call void @llvm.assume(i1 [[ASSUME]])
37 ; CHECK-NEXT: ret i1 false
39 ;; Use an assume to make all the bits known without triggering constant
40 ;; folding. This is trying to hit a corner case where we have to avoid
41 ;; taking the log of 0.
42 %assume = icmp eq i32 %arg, 0
43 call void @llvm.assume(i1 %assume)
44 %cnt = call i32 @llvm.ctpop.i32(i32 %arg)
45 %res = icmp eq i32 %cnt, 2
49 ; Negative test for when we know nothing
50 define i1 @test4(i8 %arg) {
51 ; CHECK-LABEL: @test4(
52 ; CHECK-NEXT: [[CNT:%.*]] = call i8 @llvm.ctpop.i8(i8 [[ARG:%.*]]), !range [[RNG0:![0-9]+]]
53 ; CHECK-NEXT: [[RES:%.*]] = icmp eq i8 [[CNT]], 2
54 ; CHECK-NEXT: ret i1 [[RES]]
56 %cnt = call i8 @llvm.ctpop.i8(i8 %arg)
57 %res = icmp eq i8 %cnt, 2
61 ; Test when the number of possible known bits isn't one less than a power of 2
62 ; and the compare value is greater but less than the next power of 2.
63 define i1 @test5(i32 %arg) {
64 ; CHECK-LABEL: @test5(
65 ; CHECK-NEXT: ret i1 false
67 %and = and i32 %arg, 3
68 %cnt = call i32 @llvm.ctpop.i32(i32 %and)
69 %res = icmp eq i32 %cnt, 3
73 ; Test when the number of possible known bits isn't one less than a power of 2
74 ; and the compare value is greater but less than the next power of 2.
75 ; TODO: The icmp is unnecessary given the known bits of the input, but range
76 ; metadata doesn't support vectors
77 define <2 x i1> @test5vec(<2 x i32> %arg) {
78 ; CHECK-LABEL: @test5vec(
79 ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[ARG:%.*]], <i32 3, i32 3>
80 ; CHECK-NEXT: [[CNT:%.*]] = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[AND]])
81 ; CHECK-NEXT: [[RES:%.*]] = icmp eq <2 x i32> [[CNT]], <i32 3, i32 3>
82 ; CHECK-NEXT: ret <2 x i1> [[RES]]
84 %and = and <2 x i32> %arg, <i32 3, i32 3>
85 %cnt = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %and)
86 %res = icmp eq <2 x i32> %cnt, <i32 3, i32 3>
90 ; No intrinsic or range needed - ctpop of bool bit is the bit itself.
92 define i1 @test6(i1 %arg) {
93 ; CHECK-LABEL: @test6(
94 ; CHECK-NEXT: ret i1 [[ARG:%.*]]
96 %cnt = call i1 @llvm.ctpop.i1(i1 %arg)
100 define i8 @mask_one_bit(i8 %x) {
101 ; CHECK-LABEL: @mask_one_bit(
102 ; CHECK-NEXT: [[A:%.*]] = lshr i8 [[X:%.*]], 4
103 ; CHECK-NEXT: [[R:%.*]] = and i8 [[A]], 1
104 ; CHECK-NEXT: ret i8 [[R]]
107 %r = call i8 @llvm.ctpop.i8(i8 %a)
111 define <2 x i32> @mask_one_bit_splat(<2 x i32> %x, <2 x i32>* %p) {
112 ; CHECK-LABEL: @mask_one_bit_splat(
113 ; CHECK-NEXT: [[A:%.*]] = and <2 x i32> [[X:%.*]], <i32 2048, i32 2048>
114 ; CHECK-NEXT: store <2 x i32> [[A]], <2 x i32>* [[P:%.*]], align 8
115 ; CHECK-NEXT: [[R:%.*]] = lshr exact <2 x i32> [[A]], <i32 11, i32 11>
116 ; CHECK-NEXT: ret <2 x i32> [[R]]
118 %a = and <2 x i32> %x, <i32 2048, i32 2048>
119 store <2 x i32> %a, <2 x i32>* %p
120 %r = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
124 define i32 @_parity_of_not(i32 %x) {
125 ; CHECK-LABEL: @_parity_of_not(
126 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG1:![0-9]+]]
127 ; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], 1
128 ; CHECK-NEXT: ret i32 [[R]]
130 %neg = xor i32 %x, -1
131 %cnt = tail call i32 @llvm.ctpop.i32(i32 %neg)
136 ; Negative test - need even # of bits in type.
138 define i7 @_parity_of_not_odd_type(i7 %x) {
139 ; CHECK-LABEL: @_parity_of_not_odd_type(
140 ; CHECK-NEXT: [[NEG:%.*]] = xor i7 [[X:%.*]], -1
141 ; CHECK-NEXT: [[CNT:%.*]] = tail call i7 @llvm.ctpop.i7(i7 [[NEG]]), !range [[RNG2:![0-9]+]]
142 ; CHECK-NEXT: [[R:%.*]] = and i7 [[CNT]], 1
143 ; CHECK-NEXT: ret i7 [[R]]
146 %cnt = tail call i7 @llvm.ctpop.i7(i7 %neg)
151 define <2 x i32> @_parity_of_not_vec(<2 x i32> %x) {
152 ; CHECK-LABEL: @_parity_of_not_vec(
153 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[X:%.*]])
154 ; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[TMP1]], <i32 1, i32 1>
155 ; CHECK-NEXT: ret <2 x i32> [[R]]
157 %neg = xor <2 x i32> %x, <i32 -1 ,i32 -1>
158 %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %neg)
159 %r = and <2 x i32> %cnt, <i32 1 ,i32 1>
163 define <2 x i32> @_parity_of_not_undef(<2 x i32> %x) {
164 ; CHECK-LABEL: @_parity_of_not_undef(
165 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[X:%.*]])
166 ; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[TMP1]], <i32 1, i32 1>
167 ; CHECK-NEXT: ret <2 x i32> [[R]]
169 %neg = xor <2 x i32> %x, <i32 undef ,i32 -1>
170 %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %neg)
171 %r = and <2 x i32> %cnt, <i32 1 ,i32 1>
175 define <2 x i32> @_parity_of_not_undef2(<2 x i32> %x) {
176 ; CHECK-LABEL: @_parity_of_not_undef2(
177 ; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[X:%.*]], <i32 -1, i32 -1>
178 ; CHECK-NEXT: [[CNT:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[NEG]])
179 ; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[CNT]], <i32 1, i32 undef>
180 ; CHECK-NEXT: ret <2 x i32> [[R]]
182 %neg = xor <2 x i32> %x, <i32 -1 ,i32 -1>
183 %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %neg)
184 %r = and <2 x i32> %cnt, <i32 1 ,i32 undef>
189 define i32 @ctpop_add(i32 %a, i32 %b) {
190 ; CHECK-LABEL: @ctpop_add(
191 ; CHECK-NEXT: [[AND8:%.*]] = lshr i32 [[A:%.*]], 3
192 ; CHECK-NEXT: [[CTPOP1:%.*]] = and i32 [[AND8]], 1
193 ; CHECK-NEXT: [[AND2:%.*]] = lshr i32 [[B:%.*]], 1
194 ; CHECK-NEXT: [[CTPOP2:%.*]] = and i32 [[AND2]], 1
195 ; CHECK-NEXT: [[RES:%.*]] = add nuw nsw i32 [[CTPOP1]], [[CTPOP2]]
196 ; CHECK-NEXT: ret i32 [[RES]]
198 %and8 = and i32 %a, 8
199 %ctpop1 = tail call i32 @llvm.ctpop.i32(i32 %and8)
200 %and2 = and i32 %b, 2
201 %ctpop2 = tail call i32 @llvm.ctpop.i32(i32 %and2)
202 %res = add i32 %ctpop1, %ctpop2
206 define i32 @ctpop_add_no_common_bits(i32 %a, i32 %b) {
207 ; CHECK-LABEL: @ctpop_add_no_common_bits(
208 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.fshl.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 16)
209 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.ctpop.i32(i32 [[TMP1]]), !range [[RNG1]]
210 ; CHECK-NEXT: ret i32 [[TMP2]]
212 %shl16 = shl i32 %a, 16
213 %ctpop1 = tail call i32 @llvm.ctpop.i32(i32 %shl16)
214 %lshl16 = lshr i32 %b, 16
215 %ctpop2 = tail call i32 @llvm.ctpop.i32(i32 %lshl16)
216 %res = add i32 %ctpop1, %ctpop2
220 define <2 x i32> @ctpop_add_no_common_bits_vec(<2 x i32> %a, <2 x i32> %b) {
221 ; CHECK-LABEL: @ctpop_add_no_common_bits_vec(
222 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[A:%.*]], <2 x i32> [[B:%.*]], <2 x i32> <i32 16, i32 16>)
223 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[TMP1]])
224 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
226 %shl16 = shl <2 x i32> %a, <i32 16, i32 16>
227 %ctpop1 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %shl16)
228 %lshl16 = lshr <2 x i32> %b, <i32 16, i32 16>
229 %ctpop2 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %lshl16)
230 %res = add <2 x i32> %ctpop1, %ctpop2
234 define <2 x i32> @ctpop_add_no_common_bits_vec_use(<2 x i32> %a, <2 x i32> %b, <2 x i32>* %p) {
235 ; CHECK-LABEL: @ctpop_add_no_common_bits_vec_use(
236 ; CHECK-NEXT: [[SHL16:%.*]] = shl <2 x i32> [[A:%.*]], <i32 16, i32 16>
237 ; CHECK-NEXT: [[CTPOP1:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[SHL16]])
238 ; CHECK-NEXT: [[LSHL16:%.*]] = lshr <2 x i32> [[B:%.*]], <i32 16, i32 16>
239 ; CHECK-NEXT: [[CTPOP2:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[LSHL16]])
240 ; CHECK-NEXT: store <2 x i32> [[CTPOP2]], <2 x i32>* [[P:%.*]], align 8
241 ; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <2 x i32> [[CTPOP1]], [[CTPOP2]]
242 ; CHECK-NEXT: ret <2 x i32> [[RES]]
244 %shl16 = shl <2 x i32> %a, <i32 16, i32 16>
245 %ctpop1 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %shl16)
246 %lshl16 = lshr <2 x i32> %b, <i32 16, i32 16>
247 %ctpop2 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %lshl16)
248 store <2 x i32> %ctpop2, <2 x i32>* %p
249 %res = add <2 x i32> %ctpop1, %ctpop2
253 define <2 x i32> @ctpop_add_no_common_bits_vec_use2(<2 x i32> %a, <2 x i32> %b, <2 x i32>* %p) {
254 ; CHECK-LABEL: @ctpop_add_no_common_bits_vec_use2(
255 ; CHECK-NEXT: [[SHL16:%.*]] = shl <2 x i32> [[A:%.*]], <i32 16, i32 16>
256 ; CHECK-NEXT: [[CTPOP1:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[SHL16]])
257 ; CHECK-NEXT: store <2 x i32> [[CTPOP1]], <2 x i32>* [[P:%.*]], align 8
258 ; CHECK-NEXT: [[LSHL16:%.*]] = lshr <2 x i32> [[B:%.*]], <i32 16, i32 16>
259 ; CHECK-NEXT: [[CTPOP2:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[LSHL16]])
260 ; CHECK-NEXT: [[RES:%.*]] = add nuw nsw <2 x i32> [[CTPOP1]], [[CTPOP2]]
261 ; CHECK-NEXT: ret <2 x i32> [[RES]]
263 %shl16 = shl <2 x i32> %a, <i32 16, i32 16>
264 %ctpop1 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %shl16)
265 store <2 x i32> %ctpop1, <2 x i32>* %p
266 %lshl16 = lshr <2 x i32> %b, <i32 16, i32 16>
267 %ctpop2 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %lshl16)
268 %res = add <2 x i32> %ctpop1, %ctpop2
272 define i8 @ctpop_rotate_left(i8 %a, i8 %amt) {
273 ; CHECK-LABEL: @ctpop_rotate_left(
274 ; CHECK-NEXT: [[RES:%.*]] = tail call i8 @llvm.ctpop.i8(i8 [[A:%.*]]), !range [[RNG0]]
275 ; CHECK-NEXT: ret i8 [[RES]]
277 %rotl = tail call i8 @llvm.fshl.i8(i8 %a, i8 %a, i8 %amt)
278 %res = tail call i8 @llvm.ctpop.i8(i8 %rotl)
282 define i8 @ctpop_rotate_right(i8 %a, i8 %amt) {
283 ; CHECK-LABEL: @ctpop_rotate_right(
284 ; CHECK-NEXT: [[RES:%.*]] = tail call i8 @llvm.ctpop.i8(i8 [[A:%.*]]), !range [[RNG0]]
285 ; CHECK-NEXT: ret i8 [[RES]]
287 %rotr = tail call i8 @llvm.fshr.i8(i8 %a, i8 %a, i8 %amt)
288 %res = tail call i8 @llvm.ctpop.i8(i8 %rotr)
292 declare i8 @llvm.fshl.i8(i8, i8, i8)
293 declare i8 @llvm.fshr.i8(i8, i8, i8)
295 define i8 @sub_ctpop(i8 %a) {
296 ; CHECK-LABEL: @sub_ctpop(
297 ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[A:%.*]], -1
298 ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0]]
299 ; CHECK-NEXT: ret i8 [[TMP2]]
301 %cnt = tail call i8 @llvm.ctpop.i8(i8 %a)
302 %res = sub i8 8, %cnt
306 define i8 @sub_ctpop_wrong_cst(i8 %a) {
307 ; CHECK-LABEL: @sub_ctpop_wrong_cst(
308 ; CHECK-NEXT: [[CNT:%.*]] = tail call i8 @llvm.ctpop.i8(i8 [[A:%.*]]), !range [[RNG0]]
309 ; CHECK-NEXT: [[RES:%.*]] = sub nsw i8 5, [[CNT]]
310 ; CHECK-NEXT: ret i8 [[RES]]
312 %cnt = tail call i8 @llvm.ctpop.i8(i8 %a)
313 %res = sub i8 5, %cnt
317 define i8 @sub_ctpop_unknown(i8 %a, i8 %b) {
318 ; CHECK-LABEL: @sub_ctpop_unknown(
319 ; CHECK-NEXT: [[CNT:%.*]] = tail call i8 @llvm.ctpop.i8(i8 [[A:%.*]]), !range [[RNG0]]
320 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[B:%.*]], [[CNT]]
321 ; CHECK-NEXT: ret i8 [[RES]]
323 %cnt = tail call i8 @llvm.ctpop.i8(i8 %a)
324 %res = sub i8 %b, %cnt
328 define <2 x i32> @sub_ctpop_vec(<2 x i32> %a) {
329 ; CHECK-LABEL: @sub_ctpop_vec(
330 ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[A:%.*]], <i32 -1, i32 -1>
331 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[TMP1]])
332 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
334 %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
335 %res = sub <2 x i32> <i32 32, i32 32>, %cnt
339 define <2 x i32> @sub_ctpop_vec_extra_use(<2 x i32> %a, <2 x i32>* %p) {
340 ; CHECK-LABEL: @sub_ctpop_vec_extra_use(
341 ; CHECK-NEXT: [[CNT:%.*]] = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[A:%.*]])
342 ; CHECK-NEXT: store <2 x i32> [[CNT]], <2 x i32>* [[P:%.*]], align 8
343 ; CHECK-NEXT: [[RES:%.*]] = sub nuw nsw <2 x i32> <i32 32, i32 32>, [[CNT]]
344 ; CHECK-NEXT: ret <2 x i32> [[RES]]
346 %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %a)
347 store <2 x i32> %cnt, <2 x i32>* %p
348 %res = sub <2 x i32> <i32 32, i32 32>, %cnt
352 define i32 @zext_ctpop(i16 %x) {
353 ; CHECK-LABEL: @zext_ctpop(
354 ; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.ctpop.i16(i16 [[X:%.*]]), !range [[RNG3:![0-9]+]]
355 ; CHECK-NEXT: [[P:%.*]] = zext i16 [[TMP1]] to i32
356 ; CHECK-NEXT: ret i32 [[P]]
358 %z = zext i16 %x to i32
359 %p = call i32 @llvm.ctpop.i32(i32 %z)
363 define <2 x i32> @zext_ctpop_vec(<2 x i7> %x) {
364 ; CHECK-LABEL: @zext_ctpop_vec(
365 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i7> @llvm.ctpop.v2i7(<2 x i7> [[X:%.*]])
366 ; CHECK-NEXT: [[P:%.*]] = zext <2 x i7> [[TMP1]] to <2 x i32>
367 ; CHECK-NEXT: ret <2 x i32> [[P]]
369 %z = zext <2 x i7> %x to <2 x i32>
370 %p = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %z)
374 define i32 @zext_ctpop_extra_use(i16 %x, i32* %q) {
375 ; CHECK-LABEL: @zext_ctpop_extra_use(
376 ; CHECK-NEXT: [[Z:%.*]] = zext i16 [[X:%.*]] to i32
377 ; CHECK-NEXT: store i32 [[Z]], i32* [[Q:%.*]], align 4
378 ; CHECK-NEXT: [[P:%.*]] = call i32 @llvm.ctpop.i32(i32 [[Z]]), !range [[RNG4:![0-9]+]]
379 ; CHECK-NEXT: ret i32 [[P]]
381 %z = zext i16 %x to i32
382 store i32 %z, i32* %q
383 %p = call i32 @llvm.ctpop.i32(i32 %z)
387 define i32 @parity_xor(i32 %arg, i32 %arg1) {
388 ; CHECK-LABEL: @parity_xor(
389 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[ARG1:%.*]], [[ARG:%.*]]
390 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.ctpop.i32(i32 [[TMP1]]), !range [[RNG1]]
391 ; CHECK-NEXT: [[I4:%.*]] = and i32 [[TMP2]], 1
392 ; CHECK-NEXT: ret i32 [[I4]]
394 %i = tail call i32 @llvm.ctpop.i32(i32 %arg)
395 %i2 = tail call i32 @llvm.ctpop.i32(i32 %arg1)
396 %i3 = xor i32 %i2, %i
401 define i32 @parity_xor_trunc(i64 %arg, i64 %arg1) {
402 ; CHECK-LABEL: @parity_xor_trunc(
403 ; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[ARG1:%.*]], [[ARG:%.*]]
404 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]]), !range [[RNG5:![0-9]+]]
405 ; CHECK-NEXT: [[I4:%.*]] = trunc i64 [[TMP2]] to i32
406 ; CHECK-NEXT: [[I5:%.*]] = and i32 [[I4]], 1
407 ; CHECK-NEXT: ret i32 [[I5]]
409 %i = tail call i64 @llvm.ctpop.i64(i64 %arg)
410 %i2 = tail call i64 @llvm.ctpop.i64(i64 %arg1)
411 %i3 = xor i64 %i2, %i
412 %i4 = trunc i64 %i3 to i32
417 define <2 x i32> @parity_xor_vec(<2 x i32> %arg, <2 x i32> %arg1) {
418 ; CHECK-LABEL: @parity_xor_vec(
419 ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[ARG1:%.*]], [[ARG:%.*]]
420 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> [[TMP1]])
421 ; CHECK-NEXT: [[I4:%.*]] = and <2 x i32> [[TMP2]], <i32 1, i32 1>
422 ; CHECK-NEXT: ret <2 x i32> [[I4]]
424 %i = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %arg)
425 %i2 = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %arg1)
426 %i3 = xor <2 x i32> %i2, %i
427 %i4 = and <2 x i32> %i3, <i32 1, i32 1>
431 define i32 @parity_xor_wrong_cst(i32 %arg, i32 %arg1) {
432 ; CHECK-LABEL: @parity_xor_wrong_cst(
433 ; CHECK-NEXT: [[I:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[ARG:%.*]]), !range [[RNG1]]
434 ; CHECK-NEXT: [[I2:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[ARG1:%.*]]), !range [[RNG1]]
435 ; CHECK-NEXT: [[I3:%.*]] = xor i32 [[I2]], [[I]]
436 ; CHECK-NEXT: [[I4:%.*]] = and i32 [[I3]], 3
437 ; CHECK-NEXT: ret i32 [[I4]]
439 %i = tail call i32 @llvm.ctpop.i32(i32 %arg)
440 %i2 = tail call i32 @llvm.ctpop.i32(i32 %arg1)
441 %i3 = xor i32 %i2, %i
446 define i32 @parity_xor_extra_use(i32 %arg, i32 %arg1) {
447 ; CHECK-LABEL: @parity_xor_extra_use(
448 ; CHECK-NEXT: [[I:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[ARG:%.*]]), !range [[RNG1]]
449 ; CHECK-NEXT: [[I2:%.*]] = and i32 [[I]], 1
450 ; CHECK-NEXT: tail call void @use(i32 [[I2]])
451 ; CHECK-NEXT: [[I3:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[ARG1:%.*]]), !range [[RNG1]]
452 ; CHECK-NEXT: [[I4:%.*]] = and i32 [[I3]], 1
453 ; CHECK-NEXT: [[I5:%.*]] = xor i32 [[I4]], [[I2]]
454 ; CHECK-NEXT: ret i32 [[I5]]
456 %i = tail call i32 @llvm.ctpop.i32(i32 %arg)
458 tail call void @use(i32 %i2)
459 %i3 = tail call i32 @llvm.ctpop.i32(i32 %arg1)
461 %i5 = xor i32 %i4, %i2
465 define i32 @parity_xor_extra_use2(i32 %arg, i32 %arg1) {
466 ; CHECK-LABEL: @parity_xor_extra_use2(
467 ; CHECK-NEXT: [[I:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[ARG1:%.*]]), !range [[RNG1]]
468 ; CHECK-NEXT: [[I2:%.*]] = and i32 [[I]], 1
469 ; CHECK-NEXT: tail call void @use(i32 [[I2]])
470 ; CHECK-NEXT: [[I3:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[ARG:%.*]]), !range [[RNG1]]
471 ; CHECK-NEXT: [[I4:%.*]] = and i32 [[I3]], 1
472 ; CHECK-NEXT: [[I5:%.*]] = xor i32 [[I2]], [[I4]]
473 ; CHECK-NEXT: ret i32 [[I5]]
475 %i = tail call i32 @llvm.ctpop.i32(i32 %arg1)
477 tail call void @use(i32 %i2)
478 %i3 = tail call i32 @llvm.ctpop.i32(i32 %arg)
480 %i5 = xor i32 %i2, %i4