1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
6 declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
7 declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
9 define <16 x i8> @combine_pavgb_self(<16 x i8> %a0) {
10 ; SSE-LABEL: combine_pavgb_self:
14 ; AVX-LABEL: combine_pavgb_self:
17 %1 = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %a0, <16 x i8> %a0)
21 define <16 x i8> @combine_pavgw_knownbits(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
22 ; SSE-LABEL: combine_pavgw_knownbits:
24 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31]
25 ; SSE-NEXT: pand %xmm4, %xmm0
26 ; SSE-NEXT: pand %xmm4, %xmm1
27 ; SSE-NEXT: pavgw %xmm1, %xmm0
28 ; SSE-NEXT: pand %xmm4, %xmm2
29 ; SSE-NEXT: pand %xmm4, %xmm3
30 ; SSE-NEXT: pavgw %xmm2, %xmm3
31 ; SSE-NEXT: packuswb %xmm3, %xmm0
34 ; AVX1-LABEL: combine_pavgw_knownbits:
36 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31]
37 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
38 ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
39 ; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
40 ; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm1
41 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm2
42 ; AVX1-NEXT: vpavgw %xmm2, %xmm1, %xmm1
43 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
46 ; AVX2-LABEL: combine_pavgw_knownbits:
48 ; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31]
49 ; AVX2-NEXT: vpand %xmm4, %xmm0, %xmm0
50 ; AVX2-NEXT: vpand %xmm4, %xmm1, %xmm1
51 ; AVX2-NEXT: vpavgw %xmm1, %xmm0, %xmm0
52 ; AVX2-NEXT: vpand %xmm4, %xmm2, %xmm1
53 ; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm2
54 ; AVX2-NEXT: vpavgw %xmm2, %xmm1, %xmm1
55 ; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
57 %m0 = and <8 x i16> %a0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
58 %m1 = and <8 x i16> %a1, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
59 %m2 = and <8 x i16> %a2, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
60 %m3 = and <8 x i16> %a3, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
61 %avg01 = tail call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %m0, <8 x i16> %m1)
62 %avg23 = tail call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %m2, <8 x i16> %m3)
63 %shuffle = shufflevector <8 x i16> %avg01, <8 x i16> %avg23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
64 %trunc = trunc <16 x i16> %shuffle to <16 x i8>