1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512F
8 ; Combine tests involving SSE3/SSSE3 target shuffles (MOVDDUP, MOVSHDUP, MOVSLDUP, PSHUFB)
10 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
12 define <16 x i8> @combine_vpshufb_as_zero(<16 x i8> %a0) {
13 ; SSE-LABEL: combine_vpshufb_as_zero:
15 ; SSE-NEXT: xorps %xmm0, %xmm0
18 ; AVX-LABEL: combine_vpshufb_as_zero:
20 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
22 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
23 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
24 %res2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res1, <16 x i8> <i8 0, i8 1, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
28 define <16 x i8> @combine_vpshufb_as_movq(<16 x i8> %a0) {
29 ; SSE-LABEL: combine_vpshufb_as_movq:
31 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
34 ; AVX-LABEL: combine_vpshufb_as_movq:
36 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
38 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 128, i8 1, i8 128, i8 2, i8 128, i8 3, i8 128, i8 4, i8 128, i8 5, i8 128, i8 6, i8 128, i8 7, i8 128>)
39 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 1, i8 3, i8 5, i8 7, i8 9, i8 11, i8 13, i8 15>)
43 define <2 x double> @combine_pshufb_as_movsd(<2 x double> %a0, <2 x double> %a1) {
44 ; SSSE3-LABEL: combine_pshufb_as_movsd:
46 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
49 ; SSE41-LABEL: combine_pshufb_as_movsd:
51 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
54 ; AVX-LABEL: combine_pshufb_as_movsd:
56 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
58 %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 3, i32 0>
59 %2 = bitcast <2 x double> %1 to <16 x i8>
60 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
61 %4 = bitcast <16 x i8> %3 to <2 x double>
65 define <4 x float> @combine_pshufb_as_movss(<4 x float> %a0, <4 x float> %a1) {
66 ; SSSE3-LABEL: combine_pshufb_as_movss:
68 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
71 ; SSE41-LABEL: combine_pshufb_as_movss:
73 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
76 ; AVX-LABEL: combine_pshufb_as_movss:
78 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
80 %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 3, i32 2, i32 1>
81 %2 = bitcast <4 x float> %1 to <16 x i8>
82 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 12, i8 13, i8 14, i8 15, i8 8, i8 9, i8 10, i8 11, i8 4, i8 5, i8 6, i8 7>)
83 %4 = bitcast <16 x i8> %3 to <4 x float>
87 define <4 x i32> @combine_pshufb_as_zext(<16 x i8> %a0) {
88 ; SSSE3-LABEL: combine_pshufb_as_zext:
90 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
93 ; SSE41-LABEL: combine_pshufb_as_zext:
95 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
98 ; AVX-LABEL: combine_pshufb_as_zext:
100 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
102 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 2, i8 -1, i8 -1, i8 -1, i8 3, i8 -1, i8 -1, i8 -1>)
103 %2 = bitcast <16 x i8> %1 to <4 x i32>
107 define <2 x double> @combine_pshufb_as_vzmovl_64(<2 x double> %a0) {
108 ; SSE-LABEL: combine_pshufb_as_vzmovl_64:
110 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
113 ; AVX-LABEL: combine_pshufb_as_vzmovl_64:
115 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
117 %1 = bitcast <2 x double> %a0 to <16 x i8>
118 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
119 %3 = bitcast <16 x i8> %2 to <2 x double>
123 define <4 x float> @combine_pshufb_as_vzmovl_32(<4 x float> %a0) {
124 ; SSSE3-LABEL: combine_pshufb_as_vzmovl_32:
126 ; SSSE3-NEXT: xorps %xmm1, %xmm1
127 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
128 ; SSSE3-NEXT: movaps %xmm1, %xmm0
131 ; SSE41-LABEL: combine_pshufb_as_vzmovl_32:
133 ; SSE41-NEXT: xorps %xmm1, %xmm1
134 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
137 ; AVX-LABEL: combine_pshufb_as_vzmovl_32:
139 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
140 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
142 %1 = bitcast <4 x float> %a0 to <16 x i8>
143 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
144 %3 = bitcast <16 x i8> %2 to <4 x float>
148 define <4 x float> @combine_pshufb_movddup(<4 x float> %a0) {
149 ; SSE-LABEL: combine_pshufb_movddup:
151 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,5,5,5,5,7,7,7,7]
154 ; AVX-LABEL: combine_pshufb_movddup:
156 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,5,5,5,5,7,7,7,7]
158 %1 = bitcast <4 x float> %a0 to <16 x i8>
159 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
160 %3 = bitcast <16 x i8> %2 to <4 x float>
161 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
165 define <4 x float> @combine_pshufb_movshdup(<4 x float> %a0) {
166 ; SSE-LABEL: combine_pshufb_movshdup:
168 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,7,7,7,7,7,7,7,3,3,3,3,3,3,3,3]
171 ; AVX-LABEL: combine_pshufb_movshdup:
173 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,7,7,7,7,7,7,7,3,3,3,3,3,3,3,3]
175 %1 = bitcast <4 x float> %a0 to <16 x i8>
176 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
177 %3 = bitcast <16 x i8> %2 to <4 x float>
178 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
182 define <4 x float> @combine_pshufb_movsldup(<4 x float> %a0) {
183 ; SSE-LABEL: combine_pshufb_movsldup:
185 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,1,1,1,1,1,1,1,1]
188 ; AVX-LABEL: combine_pshufb_movsldup:
190 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,1,1,1,1,1,1,1,1]
192 %1 = bitcast <4 x float> %a0 to <16 x i8>
193 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
194 %3 = bitcast <16 x i8> %2 to <4 x float>
195 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
199 define <16 x i8> @combine_pshufb_palignr(<16 x i8> %a0, <16 x i8> %a1) {
200 ; SSE-LABEL: combine_pshufb_palignr:
202 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
205 ; AVX-LABEL: combine_pshufb_palignr:
207 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
209 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
210 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
214 define <16 x i8> @combine_pshufb_pslldq(<16 x i8> %a0) {
215 ; SSE-LABEL: combine_pshufb_pslldq:
217 ; SSE-NEXT: xorps %xmm0, %xmm0
220 ; AVX-LABEL: combine_pshufb_pslldq:
222 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
224 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
225 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
229 define <16 x i8> @combine_pshufb_psrldq(<16 x i8> %a0) {
230 ; SSE-LABEL: combine_pshufb_psrldq:
232 ; SSE-NEXT: xorps %xmm0, %xmm0
235 ; AVX-LABEL: combine_pshufb_psrldq:
237 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
239 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
240 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
244 define <16 x i8> @combine_and_pshufb(<16 x i8> %a0) {
245 ; SSSE3-LABEL: combine_and_pshufb:
247 ; SSSE3-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
250 ; SSE41-LABEL: combine_and_pshufb:
252 ; SSE41-NEXT: pxor %xmm1, %xmm1
253 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
256 ; AVX-LABEL: combine_and_pshufb:
258 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
259 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
261 %1 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
262 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
266 define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) {
267 ; SSSE3-LABEL: combine_pshufb_and:
269 ; SSSE3-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
272 ; SSE41-LABEL: combine_pshufb_and:
274 ; SSE41-NEXT: pxor %xmm1, %xmm1
275 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
278 ; AVX-LABEL: combine_pshufb_and:
280 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
281 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
283 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
284 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
288 define <16 x i8> @combine_pshufb_as_palignr(<16 x i8> %a0) {
289 ; SSE-LABEL: combine_pshufb_as_palignr:
291 ; SSE-NEXT: palignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
294 ; AVX-LABEL: combine_pshufb_as_palignr:
296 ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
298 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 undef, i8 undef, i8 0>)
302 define <16 x i8> @combine_pshufb_as_pslldq(<16 x i8> %a0) {
303 ; SSE-LABEL: combine_pshufb_as_pslldq:
305 ; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
308 ; AVX-LABEL: combine_pshufb_as_pslldq:
310 ; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
312 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>)
316 define <16 x i8> @combine_pshufb_as_psrldq(<16 x i8> %a0) {
317 ; SSE-LABEL: combine_pshufb_as_psrldq:
319 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
322 ; AVX-LABEL: combine_pshufb_as_psrldq:
324 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
326 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
330 define <16 x i8> @combine_pshufb_as_psrlw(<16 x i8> %a0) {
331 ; SSE-LABEL: combine_pshufb_as_psrlw:
333 ; SSE-NEXT: psrlw $8, %xmm0
336 ; AVX-LABEL: combine_pshufb_as_psrlw:
338 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
340 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 128, i8 3, i8 128, i8 5, i8 128, i8 7, i8 128, i8 9, i8 128, i8 11, i8 128, i8 13, i8 128, i8 15, i8 128>)
344 define <16 x i8> @combine_pshufb_as_pslld(<16 x i8> %a0) {
345 ; SSE-LABEL: combine_pshufb_as_pslld:
347 ; SSE-NEXT: pslld $24, %xmm0
350 ; AVX-LABEL: combine_pshufb_as_pslld:
352 ; AVX-NEXT: vpslld $24, %xmm0, %xmm0
354 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 0, i8 128, i8 128, i8 128, i8 4, i8 128, i8 128, i8 128, i8 8, i8 128, i8 128, i8 128, i8 12>)
358 define <16 x i8> @combine_pshufb_as_psrlq(<16 x i8> %a0) {
359 ; SSE-LABEL: combine_pshufb_as_psrlq:
361 ; SSE-NEXT: psrlq $40, %xmm0
364 ; AVX-LABEL: combine_pshufb_as_psrlq:
366 ; AVX-NEXT: vpsrlq $40, %xmm0, %xmm0
368 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128>)
372 define <16 x i8> @combine_pshufb_as_pshuflw(<16 x i8> %a0) {
373 ; SSE-LABEL: combine_pshufb_as_pshuflw:
375 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
378 ; AVX-LABEL: combine_pshufb_as_pshuflw:
380 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
382 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
386 define <16 x i8> @combine_pshufb_as_pshufhw(<16 x i8> %a0) {
387 ; SSE-LABEL: combine_pshufb_as_pshufhw:
389 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
392 ; AVX-LABEL: combine_pshufb_as_pshufhw:
394 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
396 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
400 define <16 x i8> @combine_pshufb_not_as_pshufw(<16 x i8> %a0) {
401 ; SSE-LABEL: combine_pshufb_not_as_pshufw:
403 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
406 ; AVX1-LABEL: combine_pshufb_not_as_pshufw:
408 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
411 ; AVX2-LABEL: combine_pshufb_not_as_pshufw:
413 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
416 ; AVX512F-LABEL: combine_pshufb_not_as_pshufw:
418 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
419 ; AVX512F-NEXT: vprold $16, %zmm0, %zmm0
420 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
421 ; AVX512F-NEXT: vzeroupper
423 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
424 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
428 define <16 x i8> @combine_vpshufb_as_pshuflw_not_pslld(ptr%a0) {
429 ; SSE-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
431 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
434 ; AVX-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
436 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
438 %res0 = load <16 x i8>, ptr%a0, align 16
439 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
443 define <16 x i8> @combine_pshufb_as_unary_unpcklbw(<16 x i8> %a0) {
444 ; SSE-LABEL: combine_pshufb_as_unary_unpcklbw:
446 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
449 ; AVX-LABEL: combine_pshufb_as_unary_unpcklbw:
451 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
453 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 undef, i8 undef, i8 1, i8 2, i8 2, i8 3, i8 3, i8 4, i8 4, i8 5, i8 5, i8 6, i8 6, i8 7, i8 7>)
457 define <16 x i8> @combine_pshufb_as_unary_unpckhwd(<16 x i8> %a0) {
458 ; SSE-LABEL: combine_pshufb_as_unary_unpckhwd:
460 ; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
463 ; AVX-LABEL: combine_pshufb_as_unary_unpckhwd:
465 ; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
467 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 9, i8 8, i8 9, i8 10, i8 11, i8 10, i8 11, i8 12, i8 13, i8 12, i8 13, i8 14, i8 15, i8 undef, i8 undef>)
471 define <8 x i16> @combine_pshufb_as_unpacklo_undef(<16 x i8> %a0) {
472 ; CHECK-LABEL: combine_pshufb_as_unpacklo_undef:
475 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 2, i8 3, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 6, i8 7>)
476 %2 = bitcast <16 x i8> %1 to <8 x i16>
477 %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
481 define <16 x i8> @combine_pshufb_as_unpackhi_undef(<16 x i8> %a0) {
482 ; CHECK-LABEL: combine_pshufb_as_unpackhi_undef:
485 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 undef, i8 9, i8 undef, i8 10, i8 undef, i8 11, i8 undef, i8 12, i8 undef, i8 13, i8 undef, i8 14, i8 undef, i8 15, i8 undef>)
486 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
490 define <16 x i8> @combine_pshufb_as_unpacklo_zero(<16 x i8> %a0) {
491 ; SSE-LABEL: combine_pshufb_as_unpacklo_zero:
493 ; SSE-NEXT: xorps %xmm1, %xmm1
494 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
495 ; SSE-NEXT: movaps %xmm1, %xmm0
498 ; AVX-LABEL: combine_pshufb_as_unpacklo_zero:
500 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
501 ; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
503 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 4, i8 5, i8 6, i8 7>)
507 define <16 x i8> @combine_pshufb_as_unpackhi_zero(<16 x i8> %a0) {
508 ; SSE-LABEL: combine_pshufb_as_unpackhi_zero:
510 ; SSE-NEXT: pxor %xmm1, %xmm1
511 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
514 ; AVX-LABEL: combine_pshufb_as_unpackhi_zero:
516 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
517 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
519 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 -1, i8 9, i8 -1, i8 10, i8 -1, i8 11, i8 -1, i8 12, i8 -1, i8 13, i8 -1, i8 14, i8 -1, i8 15, i8 -1>)
523 define <16 x i8> @combine_psrlw_pshufb(<8 x i16> %a0) {
524 ; SSE-LABEL: combine_psrlw_pshufb:
526 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero
529 ; AVX-LABEL: combine_psrlw_pshufb:
531 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero
533 %1 = lshr <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
534 %2 = bitcast <8 x i16> %1 to <16 x i8>
535 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1>)
539 define <16 x i8> @combine_pslld_pshufb(<4 x i32> %a0) {
540 ; SSE-LABEL: combine_pslld_pshufb:
542 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,1,0],zero,xmm0[6,5,4],zero,xmm0[10,9,8],zero,xmm0[14,13,12],zero
545 ; AVX-LABEL: combine_pslld_pshufb:
547 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,1,0],zero,xmm0[6,5,4],zero,xmm0[10,9,8],zero,xmm0[14,13,12],zero
549 %1 = shl <4 x i32> %a0, <i32 8, i32 8, i32 8, i32 8>
550 %2 = bitcast <4 x i32> %1 to <16 x i8>
551 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 11, i8 10, i8 9, i8 8, i8 15, i8 14, i8 13, i8 12>)
555 define <16 x i8> @combine_psrlq_pshufb(<2 x i64> %a0) {
556 ; SSE-LABEL: combine_psrlq_pshufb:
558 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[7,6],zero,zero,zero,zero,zero,zero,xmm0[15,14]
561 ; AVX-LABEL: combine_psrlq_pshufb:
563 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[7,6],zero,zero,zero,zero,zero,zero,xmm0[15,14]
565 %1 = lshr <2 x i64> %a0, <i64 48, i64 48>
566 %2 = bitcast <2 x i64> %1 to <16 x i8>
567 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8>)
571 define <16 x i8> @combine_unpckl_arg0_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
572 ; SSE-LABEL: combine_unpckl_arg0_pshufb:
574 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
577 ; AVX-LABEL: combine_unpckl_arg0_pshufb:
579 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
581 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
582 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1>)
586 define <16 x i8> @combine_unpckl_arg1_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
587 ; SSE-LABEL: combine_unpckl_arg1_pshufb:
589 ; SSE-NEXT: movdqa %xmm1, %xmm0
590 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
593 ; AVX-LABEL: combine_unpckl_arg1_pshufb:
595 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
597 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
598 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1>)
602 define <8 x i16> @shuffle_combine_unpack_insert(<8 x i16> %a0) {
603 ; SSE-LABEL: shuffle_combine_unpack_insert:
605 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
608 ; AVX-LABEL: shuffle_combine_unpack_insert:
610 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
612 %1 = extractelement <8 x i16> %a0, i32 2
613 %2 = extractelement <8 x i16> %a0, i32 4
614 %3 = insertelement <8 x i16> %a0, i16 %1, i32 4
615 %4 = insertelement <8 x i16> %a0, i16 %2, i32 2
616 %5 = shufflevector <8 x i16> %3, <8 x i16> %4, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
617 %6 = shufflevector <8 x i16> %5, <8 x i16> %3, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
618 %7 = shufflevector <8 x i16> %5, <8 x i16> %a0, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
619 %8 = shufflevector <8 x i16> %6, <8 x i16> %7, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
623 define <16 x i8> @shuffle_combine_packssdw_pshufb(<4 x i32> %a0) {
624 ; SSE-LABEL: shuffle_combine_packssdw_pshufb:
626 ; SSE-NEXT: psrad $31, %xmm0
627 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13,12,9,8,5,4,1,0,13,12,9,8,5,4,1,0]
630 ; AVX-LABEL: shuffle_combine_packssdw_pshufb:
632 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
633 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13,12,9,8,5,4,1,0,13,12,9,8,5,4,1,0]
635 %1 = ashr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
636 %2 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %1, <4 x i32> %1)
637 %3 = bitcast <8 x i16> %2 to <16 x i8>
638 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8>)
641 declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
643 define <16 x i8> @shuffle_combine_packsswb_pshufb(<8 x i16> %a0, <8 x i16> %a1) {
644 ; SSE-LABEL: shuffle_combine_packsswb_pshufb:
646 ; SSE-NEXT: psraw $15, %xmm0
647 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0]
650 ; AVX-LABEL: shuffle_combine_packsswb_pshufb:
652 ; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
653 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0]
655 %1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
656 %2 = ashr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
657 %3 = tail call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %1, <8 x i16> %2)
658 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
661 declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
663 define <16 x i8> @shuffle_combine_packuswb_pshufb(<8 x i16> %a0, <8 x i16> %a1) {
664 ; SSE-LABEL: shuffle_combine_packuswb_pshufb:
666 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1]
669 ; AVX-LABEL: shuffle_combine_packuswb_pshufb:
671 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1]
673 %1 = lshr <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
674 %2 = lshr <8 x i16> %a1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
675 %3 = tail call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %1, <8 x i16> %2)
676 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
679 declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
681 define <16 x i8> @combine_pshufb_pshufb_or_as_blend(<16 x i8> %a0, <16 x i8> %a1) {
682 ; SSSE3-LABEL: combine_pshufb_pshufb_or_as_blend:
684 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
687 ; SSE41-LABEL: combine_pshufb_pshufb_or_as_blend:
689 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
692 ; AVX-LABEL: combine_pshufb_pshufb_or_as_blend:
694 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
696 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
697 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
698 %3 = or <16 x i8> %1, %2
702 define <16 x i8> @combine_pshufb_pshufb_or_as_unpcklbw(<16 x i8> %a0, <16 x i8> %a1) {
703 ; SSE-LABEL: combine_pshufb_pshufb_or_as_unpcklbw:
705 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
708 ; AVX-LABEL: combine_pshufb_pshufb_or_as_unpcklbw:
710 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
712 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 -1, i8 1, i8 -1, i8 2, i8 -1, i8 3, i8 -1, i8 4, i8 -1, i8 5, i8 -1, i8 6, i8 -1, i8 7, i8 -1>)
713 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a1, <16 x i8> <i8 -1, i8 0, i8 -1, i8 1, i8 -1, i8 2, i8 -1, i8 3, i8 -1, i8 4, i8 -1, i8 5, i8 -1, i8 6, i8 -1, i8 7>)
714 %3 = or <16 x i8> %1, %2
718 define <16 x i8> @combine_pshufb_pshufb_or_pshufb(<16 x i8> %a0) {
719 ; SSE-LABEL: combine_pshufb_pshufb_or_pshufb:
721 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
724 ; AVX1-LABEL: combine_pshufb_pshufb_or_pshufb:
726 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
729 ; AVX2-LABEL: combine_pshufb_pshufb_or_pshufb:
731 ; AVX2-NEXT: vbroadcastss %xmm0, %xmm0
734 ; AVX512F-LABEL: combine_pshufb_pshufb_or_pshufb:
736 ; AVX512F-NEXT: vbroadcastss %xmm0, %xmm0
738 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1>)
739 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3>)
740 %3 = or <16 x i8> %1, %2
741 %4 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
745 define <16 x i8> @combine_and_pshufb_or_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
746 ; SSE-LABEL: combine_and_pshufb_or_pshufb:
748 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[15],zero,xmm0[1],zero,xmm0[14],zero,xmm0[2],zero,xmm0[13],zero,xmm0[3],zero,zero
749 ; SSE-NEXT: pshufb {{.*#+}} xmm1 = xmm1[7],zero,xmm1[0],zero,xmm1[8],zero,xmm1[1],zero,xmm1[9],zero,xmm1[10],zero,xmm1[7],zero,xmm1[7],zero
750 ; SSE-NEXT: por %xmm1, %xmm0
753 ; AVX-LABEL: combine_and_pshufb_or_pshufb:
755 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[15],zero,xmm0[1],zero,xmm0[14],zero,xmm0[2],zero,xmm0[13],zero,xmm0[3],zero,zero
756 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[7],zero,xmm1[0],zero,xmm1[8],zero,xmm1[1],zero,xmm1[9],zero,xmm1[10],zero,xmm1[7],zero,xmm1[7],zero
757 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
759 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 15, i8 -1, i8 1, i8 -1, i8 14, i8 -1, i8 2, i8 -1, i8 13, i8 -1, i8 3, i8 -1, i8 -1>)
760 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a1, <16 x i8> <i8 7, i8 -1, i8 0, i8 -1, i8 8, i8 -1, i8 1, i8 -1, i8 9, i8 -1, i8 10, i8 -1, i8 7, i8 -1, i8 7, i8 -1>)
761 %3 = or <16 x i8> %1, %2
762 %4 = and <16 x i8> %3, <i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
766 define <16 x i8> @constant_fold_pshufb() {
767 ; SSE-LABEL: constant_fold_pshufb:
769 ; SSE-NEXT: movaps {{.*#+}} xmm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9>
772 ; AVX-LABEL: constant_fold_pshufb:
774 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9>
776 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>)
780 define <16 x i8> @constant_fold_pshufb_2() {
781 ; SSE-LABEL: constant_fold_pshufb_2:
783 ; SSE-NEXT: movl $2, %eax
784 ; SSE-NEXT: movd %eax, %xmm0
787 ; AVX-LABEL: constant_fold_pshufb_2:
789 ; AVX-NEXT: movl $2, %eax
790 ; AVX-NEXT: vmovd %eax, %xmm0
792 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 2, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
796 define i32 @mask_zzz3_v16i8(<16 x i8> %a0) {
797 ; SSSE3-LABEL: mask_zzz3_v16i8:
799 ; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
800 ; SSSE3-NEXT: movd %xmm0, %eax
801 ; SSSE3-NEXT: andl $-16777216, %eax # imm = 0xFF000000
804 ; SSE41-LABEL: mask_zzz3_v16i8:
806 ; SSE41-NEXT: psllw $8, %xmm0
807 ; SSE41-NEXT: pextrd $3, %xmm0, %eax
808 ; SSE41-NEXT: andl $-16777216, %eax # imm = 0xFF000000
811 ; AVX-LABEL: mask_zzz3_v16i8:
813 ; AVX-NEXT: vpsllw $8, %xmm0, %xmm0
814 ; AVX-NEXT: vpextrd $3, %xmm0, %eax
815 ; AVX-NEXT: andl $-16777216, %eax # imm = 0xFF000000
817 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>)
818 %2 = bitcast <16 x i8> %1 to <4 x i32>
819 %3 = extractelement <4 x i32> %2, i32 3
820 %4 = and i32 %3, 4278190080
824 define i32 @mask_z1z3_v16i8(<16 x i8> %a0) {
825 ; SSSE3-LABEL: mask_z1z3_v16i8:
827 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[10],zero,xmm0[14,u,u,u,u,u,u,u,u,u,u,u,u]
828 ; SSSE3-NEXT: movd %xmm0, %eax
831 ; SSE41-LABEL: mask_z1z3_v16i8:
833 ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,xmm0[10],zero,xmm0[14]
834 ; SSE41-NEXT: pextrd $3, %xmm0, %eax
837 ; AVX-LABEL: mask_z1z3_v16i8:
839 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,xmm0[10],zero,xmm0[14]
840 ; AVX-NEXT: vpextrd $3, %xmm0, %eax
842 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>)
843 %2 = bitcast <16 x i8> %1 to <4 x i32>
844 %3 = extractelement <4 x i32> %2, i32 3
845 %4 = and i32 %3, 4278255360
849 define i32 @PR22415(double %a0) {
850 ; SSE-LABEL: PR22415:
852 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
853 ; SSE-NEXT: movd %xmm0, %eax
856 ; AVX-LABEL: PR22415:
858 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
859 ; AVX-NEXT: vmovd %xmm0, %eax
861 %1 = bitcast double %a0 to <8 x i8>
862 %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 undef>
863 %3 = shufflevector <4 x i8> %2, <4 x i8> undef, <3 x i32> <i32 0, i32 1, i32 2>
864 %4 = bitcast <3 x i8> %3 to i24
865 %5 = zext i24 %4 to i32