1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=btver2 | FileCheck %s
4 define i64 @PR81136(i32 %a0, i32 %a1, ptr %a2) {
5 ; CHECK-LABEL: PR81136:
7 ; CHECK-NEXT: vmovd %edi, %xmm0
8 ; CHECK-NEXT: vmovd %esi, %xmm1
9 ; CHECK-NEXT: vmovdqa (%rdx), %ymm2
10 ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
11 ; CHECK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
12 ; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
13 ; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
14 ; CHECK-NEXT: vpmovsxwq %xmm0, %xmm0
15 ; CHECK-NEXT: vpalignr {{.*#+}} xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
16 ; CHECK-NEXT: vpcmpeqq %xmm3, %xmm0, %xmm0
17 ; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
18 ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
19 ; CHECK-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm1
20 ; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm2
21 ; CHECK-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
22 ; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
23 ; CHECK-NEXT: vandnpd %ymm0, %ymm1, %ymm0
24 ; CHECK-NEXT: vmovmskpd %ymm0, %eax
25 ; CHECK-NEXT: popcntl %eax, %eax
26 ; CHECK-NEXT: negq %rax
28 %v0 = bitcast i32 %a0 to <2 x i16>
29 %v1 = bitcast i32 %a1 to <2 x i16>
30 %cmp15 = icmp sle <2 x i16> %v1, %v0
31 %conv16 = sext <2 x i1> %cmp15 to <2 x i64>
32 %shuffle29 = shufflevector <2 x i64> %conv16, <2 x i64> <i64 128, i64 1>, <4 x i32> <i32 2, i32 3, i32 3, i32 0>
33 %data = load volatile <4 x i64>, ptr %a2, align 32
34 %cmp65 = icmp ne <4 x i64> %data, <i64 -2071602529, i64 -1537047284, i64 717942021, i64 597457239>
35 %cmp67 = icmp ne <4 x i64> %shuffle29, zeroinitializer
36 %and = and <4 x i1> %cmp65, %cmp67
37 %mask = bitcast <4 x i1> %and to i4
38 %cnt = tail call i4 @llvm.ctpop.i4(i4 %mask)
39 %cntz = zext i4 %cnt to i64
40 %res = sub nsw i64 0, %cntz
43 declare i4 @llvm.ctpop.i4(i4)