1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
5 ; These tests just check that the plumbing is in place for @llvm.bitreverse. The
6 ; actual output is massive at the moment as llvm.bitreverse is not yet legal.
8 declare i32 @llvm.bitreverse.i32(i32) readnone
9 declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) readnone
11 ; fold (bitreverse undef) -> undef
12 define i32 @test_undef() nounwind {
13 ; X86-LABEL: test_undef:
17 ; X64-LABEL: test_undef:
20 %b = call i32 @llvm.bitreverse.i32(i32 undef)
24 ; fold (bitreverse (bitreverse x)) -> x
25 define i32 @test_bitreverse_bitreverse(i32 %a0) nounwind {
26 ; X86-LABEL: test_bitreverse_bitreverse:
28 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
31 ; X64-LABEL: test_bitreverse_bitreverse:
33 ; X64-NEXT: movl %edi, %eax
35 %b = call i32 @llvm.bitreverse.i32(i32 %a0)
36 %c = call i32 @llvm.bitreverse.i32(i32 %b)
40 define <4 x i32> @test_demandedbits_bitreverse(<4 x i32> %a0) nounwind {
41 ; X86-LABEL: test_demandedbits_bitreverse:
43 ; X86-NEXT: pxor %xmm1, %xmm1
44 ; X86-NEXT: movdqa %xmm0, %xmm2
45 ; X86-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
46 ; X86-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
47 ; X86-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
48 ; X86-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
49 ; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
50 ; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
51 ; X86-NEXT: packuswb %xmm2, %xmm0
52 ; X86-NEXT: movdqa %xmm0, %xmm1
53 ; X86-NEXT: psllw $4, %xmm1
54 ; X86-NEXT: pand {{\.LCPI.*}}, %xmm1
55 ; X86-NEXT: psrlw $4, %xmm0
56 ; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
57 ; X86-NEXT: por %xmm1, %xmm0
58 ; X86-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
59 ; X86-NEXT: pand %xmm0, %xmm1
60 ; X86-NEXT: psllw $2, %xmm1
61 ; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
62 ; X86-NEXT: psrlw $2, %xmm0
63 ; X86-NEXT: por %xmm1, %xmm0
64 ; X86-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
65 ; X86-NEXT: pand %xmm0, %xmm1
66 ; X86-NEXT: paddb %xmm1, %xmm1
67 ; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
68 ; X86-NEXT: psrlw $1, %xmm0
69 ; X86-NEXT: por %xmm1, %xmm0
70 ; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
73 ; X64-LABEL: test_demandedbits_bitreverse:
75 ; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
76 ; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
77 ; X64-NEXT: vpand %xmm1, %xmm0, %xmm2
78 ; X64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
79 ; X64-NEXT: vpshufb %xmm2, %xmm3, %xmm2
80 ; X64-NEXT: vpsrlw $4, %xmm0, %xmm0
81 ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
82 ; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
83 ; X64-NEXT: vpshufb %xmm0, %xmm1, %xmm0
84 ; X64-NEXT: vpor %xmm0, %xmm2, %xmm0
85 ; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
87 %b = or <4 x i32> %a0, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
88 %c = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %b)
89 %d = and <4 x i32> %c, <i32 -2, i32 -2, i32 -2, i32 -2>