1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX512F
8 ; Combine tests involving SSE3/SSSE3 target shuffles (MOVDDUP, MOVSHDUP, MOVSLDUP, PSHUFB)
10 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
12 define <16 x i8> @combine_vpshufb_as_zero(<16 x i8> %a0) {
13 ; SSE-LABEL: combine_vpshufb_as_zero:
15 ; SSE-NEXT: xorps %xmm0, %xmm0
18 ; AVX-LABEL: combine_vpshufb_as_zero:
20 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
22 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
23 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
24 %res2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res1, <16 x i8> <i8 0, i8 1, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
28 define <16 x i8> @combine_vpshufb_as_movq(<16 x i8> %a0) {
29 ; SSE-LABEL: combine_vpshufb_as_movq:
31 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
34 ; AVX-LABEL: combine_vpshufb_as_movq:
36 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
38 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 128, i8 1, i8 128, i8 2, i8 128, i8 3, i8 128, i8 4, i8 128, i8 5, i8 128, i8 6, i8 128, i8 7, i8 128>)
39 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 1, i8 3, i8 5, i8 7, i8 9, i8 11, i8 13, i8 15>)
43 define <2 x double> @combine_pshufb_as_movsd(<2 x double> %a0, <2 x double> %a1) {
44 ; SSSE3-LABEL: combine_pshufb_as_movsd:
46 ; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
47 ; SSSE3-NEXT: movapd %xmm1, %xmm0
50 ; SSE41-LABEL: combine_pshufb_as_movsd:
52 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
55 ; AVX-LABEL: combine_pshufb_as_movsd:
57 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
59 %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 3, i32 0>
60 %2 = bitcast <2 x double> %1 to <16 x i8>
61 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
62 %4 = bitcast <16 x i8> %3 to <2 x double>
66 define <4 x float> @combine_pshufb_as_movss(<4 x float> %a0, <4 x float> %a1) {
67 ; SSSE3-LABEL: combine_pshufb_as_movss:
69 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
72 ; SSE41-LABEL: combine_pshufb_as_movss:
74 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
77 ; AVX-LABEL: combine_pshufb_as_movss:
79 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
81 %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 3, i32 2, i32 1>
82 %2 = bitcast <4 x float> %1 to <16 x i8>
83 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 12, i8 13, i8 14, i8 15, i8 8, i8 9, i8 10, i8 11, i8 4, i8 5, i8 6, i8 7>)
84 %4 = bitcast <16 x i8> %3 to <4 x float>
88 define <4 x i32> @combine_pshufb_as_zext(<16 x i8> %a0) {
89 ; SSSE3-LABEL: combine_pshufb_as_zext:
91 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
94 ; SSE41-LABEL: combine_pshufb_as_zext:
96 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
99 ; AVX-LABEL: combine_pshufb_as_zext:
101 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
103 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 2, i8 -1, i8 -1, i8 -1, i8 3, i8 -1, i8 -1, i8 -1>)
104 %2 = bitcast <16 x i8> %1 to <4 x i32>
108 define <2 x double> @combine_pshufb_as_vzmovl_64(<2 x double> %a0) {
109 ; SSE-LABEL: combine_pshufb_as_vzmovl_64:
111 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
114 ; AVX-LABEL: combine_pshufb_as_vzmovl_64:
116 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
118 %1 = bitcast <2 x double> %a0 to <16 x i8>
119 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
120 %3 = bitcast <16 x i8> %2 to <2 x double>
124 define <4 x float> @combine_pshufb_as_vzmovl_32(<4 x float> %a0) {
125 ; SSSE3-LABEL: combine_pshufb_as_vzmovl_32:
127 ; SSSE3-NEXT: xorps %xmm1, %xmm1
128 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
129 ; SSSE3-NEXT: movaps %xmm1, %xmm0
132 ; SSE41-LABEL: combine_pshufb_as_vzmovl_32:
134 ; SSE41-NEXT: xorps %xmm1, %xmm1
135 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
138 ; AVX-LABEL: combine_pshufb_as_vzmovl_32:
140 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
141 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
143 %1 = bitcast <4 x float> %a0 to <16 x i8>
144 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
145 %3 = bitcast <16 x i8> %2 to <4 x float>
149 define <4 x float> @combine_pshufb_movddup(<4 x float> %a0) {
150 ; SSE-LABEL: combine_pshufb_movddup:
152 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,5,5,5,5,7,7,7,7]
155 ; AVX-LABEL: combine_pshufb_movddup:
157 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,5,5,5,5,7,7,7,7]
159 %1 = bitcast <4 x float> %a0 to <16 x i8>
160 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
161 %3 = bitcast <16 x i8> %2 to <4 x float>
162 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
166 define <4 x float> @combine_pshufb_movshdup(<4 x float> %a0) {
167 ; SSE-LABEL: combine_pshufb_movshdup:
169 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,7,7,7,7,7,7,7,3,3,3,3,3,3,3,3]
172 ; AVX-LABEL: combine_pshufb_movshdup:
174 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,7,7,7,7,7,7,7,3,3,3,3,3,3,3,3]
176 %1 = bitcast <4 x float> %a0 to <16 x i8>
177 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
178 %3 = bitcast <16 x i8> %2 to <4 x float>
179 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
183 define <4 x float> @combine_pshufb_movsldup(<4 x float> %a0) {
184 ; SSE-LABEL: combine_pshufb_movsldup:
186 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,1,1,1,1,1,1,1,1]
189 ; AVX-LABEL: combine_pshufb_movsldup:
191 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,1,1,1,1,1,1,1,1]
193 %1 = bitcast <4 x float> %a0 to <16 x i8>
194 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
195 %3 = bitcast <16 x i8> %2 to <4 x float>
196 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
200 define <16 x i8> @combine_pshufb_palignr(<16 x i8> %a0, <16 x i8> %a1) {
201 ; SSE-LABEL: combine_pshufb_palignr:
203 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
206 ; AVX-LABEL: combine_pshufb_palignr:
208 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
210 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
211 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
215 define <16 x i8> @combine_pshufb_pslldq(<16 x i8> %a0) {
216 ; SSE-LABEL: combine_pshufb_pslldq:
218 ; SSE-NEXT: xorps %xmm0, %xmm0
221 ; AVX-LABEL: combine_pshufb_pslldq:
223 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
225 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
226 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
230 define <16 x i8> @combine_pshufb_psrldq(<16 x i8> %a0) {
231 ; SSE-LABEL: combine_pshufb_psrldq:
233 ; SSE-NEXT: xorps %xmm0, %xmm0
236 ; AVX-LABEL: combine_pshufb_psrldq:
238 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
240 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
241 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
245 define <16 x i8> @combine_and_pshufb(<16 x i8> %a0) {
246 ; SSSE3-LABEL: combine_and_pshufb:
248 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
251 ; SSE41-LABEL: combine_and_pshufb:
253 ; SSE41-NEXT: pxor %xmm1, %xmm1
254 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
257 ; AVX-LABEL: combine_and_pshufb:
259 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
260 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
262 %1 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
263 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
267 define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) {
268 ; SSSE3-LABEL: combine_pshufb_and:
270 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
273 ; SSE41-LABEL: combine_pshufb_and:
275 ; SSE41-NEXT: pxor %xmm1, %xmm1
276 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
279 ; AVX-LABEL: combine_pshufb_and:
281 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
282 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
284 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
285 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
289 define <16 x i8> @combine_pshufb_as_palignr(<16 x i8> %a0) {
290 ; SSE-LABEL: combine_pshufb_as_palignr:
292 ; SSE-NEXT: palignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
295 ; AVX-LABEL: combine_pshufb_as_palignr:
297 ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
299 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 undef, i8 undef, i8 0>)
303 define <16 x i8> @combine_pshufb_as_pslldq(<16 x i8> %a0) {
304 ; SSE-LABEL: combine_pshufb_as_pslldq:
306 ; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
309 ; AVX-LABEL: combine_pshufb_as_pslldq:
311 ; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
313 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>)
317 define <16 x i8> @combine_pshufb_as_psrldq(<16 x i8> %a0) {
318 ; SSE-LABEL: combine_pshufb_as_psrldq:
320 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
323 ; AVX-LABEL: combine_pshufb_as_psrldq:
325 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
327 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
331 define <16 x i8> @combine_pshufb_as_psrlw(<16 x i8> %a0) {
332 ; SSE-LABEL: combine_pshufb_as_psrlw:
334 ; SSE-NEXT: psrlw $8, %xmm0
337 ; AVX-LABEL: combine_pshufb_as_psrlw:
339 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
341 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 128, i8 3, i8 128, i8 5, i8 128, i8 7, i8 128, i8 9, i8 128, i8 11, i8 128, i8 13, i8 128, i8 15, i8 128>)
345 define <16 x i8> @combine_pshufb_as_pslld(<16 x i8> %a0) {
346 ; SSE-LABEL: combine_pshufb_as_pslld:
348 ; SSE-NEXT: pslld $24, %xmm0
351 ; AVX-LABEL: combine_pshufb_as_pslld:
353 ; AVX-NEXT: vpslld $24, %xmm0, %xmm0
355 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 0, i8 128, i8 128, i8 128, i8 4, i8 128, i8 128, i8 128, i8 8, i8 128, i8 128, i8 128, i8 12>)
359 define <16 x i8> @combine_pshufb_as_psrlq(<16 x i8> %a0) {
360 ; SSE-LABEL: combine_pshufb_as_psrlq:
362 ; SSE-NEXT: psrlq $40, %xmm0
365 ; AVX-LABEL: combine_pshufb_as_psrlq:
367 ; AVX-NEXT: vpsrlq $40, %xmm0, %xmm0
369 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128>)
373 define <16 x i8> @combine_pshufb_as_pshuflw(<16 x i8> %a0) {
374 ; SSE-LABEL: combine_pshufb_as_pshuflw:
376 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
379 ; AVX-LABEL: combine_pshufb_as_pshuflw:
381 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
383 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
387 define <16 x i8> @combine_pshufb_as_pshufhw(<16 x i8> %a0) {
388 ; SSE-LABEL: combine_pshufb_as_pshufhw:
390 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
393 ; AVX-LABEL: combine_pshufb_as_pshufhw:
395 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
397 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
401 define <16 x i8> @combine_pshufb_not_as_pshufw(<16 x i8> %a0) {
402 ; SSE-LABEL: combine_pshufb_not_as_pshufw:
404 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
407 ; AVX-LABEL: combine_pshufb_not_as_pshufw:
409 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
411 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
412 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
416 define <16 x i8> @combine_vpshufb_as_pshuflw_not_pslld(<16 x i8> *%a0) {
417 ; SSE-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
419 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
422 ; AVX-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
424 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
426 %res0 = load <16 x i8>, <16 x i8> *%a0, align 16
427 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
431 define <16 x i8> @combine_pshufb_as_unary_unpcklbw(<16 x i8> %a0) {
432 ; SSE-LABEL: combine_pshufb_as_unary_unpcklbw:
434 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
437 ; AVX-LABEL: combine_pshufb_as_unary_unpcklbw:
439 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
441 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 undef, i8 undef, i8 1, i8 2, i8 2, i8 3, i8 3, i8 4, i8 4, i8 5, i8 5, i8 6, i8 6, i8 7, i8 7>)
445 define <16 x i8> @combine_pshufb_as_unary_unpckhwd(<16 x i8> %a0) {
446 ; SSE-LABEL: combine_pshufb_as_unary_unpckhwd:
448 ; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
451 ; AVX-LABEL: combine_pshufb_as_unary_unpckhwd:
453 ; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
455 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 9, i8 8, i8 9, i8 10, i8 11, i8 10, i8 11, i8 12, i8 13, i8 12, i8 13, i8 14, i8 15, i8 undef, i8 undef>)
459 define <8 x i16> @combine_pshufb_as_unpacklo_undef(<16 x i8> %a0) {
460 ; CHECK-LABEL: combine_pshufb_as_unpacklo_undef:
463 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 2, i8 3, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 6, i8 7>)
464 %2 = bitcast <16 x i8> %1 to <8 x i16>
465 %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
469 define <16 x i8> @combine_pshufb_as_unpackhi_undef(<16 x i8> %a0) {
470 ; CHECK-LABEL: combine_pshufb_as_unpackhi_undef:
473 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 undef, i8 9, i8 undef, i8 10, i8 undef, i8 11, i8 undef, i8 12, i8 undef, i8 13, i8 undef, i8 14, i8 undef, i8 15, i8 undef>)
474 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
478 define <16 x i8> @combine_pshufb_as_unpacklo_zero(<16 x i8> %a0) {
479 ; SSE-LABEL: combine_pshufb_as_unpacklo_zero:
481 ; SSE-NEXT: xorps %xmm1, %xmm1
482 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
483 ; SSE-NEXT: movaps %xmm1, %xmm0
486 ; AVX-LABEL: combine_pshufb_as_unpacklo_zero:
488 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
489 ; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
491 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 4, i8 5, i8 6, i8 7>)
495 define <16 x i8> @combine_pshufb_as_unpackhi_zero(<16 x i8> %a0) {
496 ; SSE-LABEL: combine_pshufb_as_unpackhi_zero:
498 ; SSE-NEXT: pxor %xmm1, %xmm1
499 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
502 ; AVX-LABEL: combine_pshufb_as_unpackhi_zero:
504 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
505 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
507 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 -1, i8 9, i8 -1, i8 10, i8 -1, i8 11, i8 -1, i8 12, i8 -1, i8 13, i8 -1, i8 14, i8 -1, i8 15, i8 -1>)
511 define <16 x i8> @combine_psrlw_pshufb(<8 x i16> %a0) {
512 ; SSE-LABEL: combine_psrlw_pshufb:
514 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero
517 ; AVX-LABEL: combine_psrlw_pshufb:
519 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero
521 %1 = lshr <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
522 %2 = bitcast <8 x i16> %1 to <16 x i8>
523 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1>)
527 define <16 x i8> @combine_pslld_pshufb(<4 x i32> %a0) {
528 ; SSE-LABEL: combine_pslld_pshufb:
530 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,1,0],zero,xmm0[6,5,4],zero,xmm0[10,9,8],zero,xmm0[14,13,12],zero
533 ; AVX-LABEL: combine_pslld_pshufb:
535 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,1,0],zero,xmm0[6,5,4],zero,xmm0[10,9,8],zero,xmm0[14,13,12],zero
537 %1 = shl <4 x i32> %a0, <i32 8, i32 8, i32 8, i32 8>
538 %2 = bitcast <4 x i32> %1 to <16 x i8>
539 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 11, i8 10, i8 9, i8 8, i8 15, i8 14, i8 13, i8 12>)
543 define <16 x i8> @combine_psrlq_pshufb(<2 x i64> %a0) {
544 ; SSE-LABEL: combine_psrlq_pshufb:
546 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[7,6],zero,zero,zero,zero,zero,zero,xmm0[15,14]
549 ; AVX-LABEL: combine_psrlq_pshufb:
551 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[7,6],zero,zero,zero,zero,zero,zero,xmm0[15,14]
553 %1 = lshr <2 x i64> %a0, <i64 48, i64 48>
554 %2 = bitcast <2 x i64> %1 to <16 x i8>
555 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8>)
559 define <16 x i8> @combine_unpckl_arg0_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
560 ; SSE-LABEL: combine_unpckl_arg0_pshufb:
562 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
565 ; AVX-LABEL: combine_unpckl_arg0_pshufb:
567 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
569 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
570 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1>)
574 define <16 x i8> @combine_unpckl_arg1_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
575 ; SSE-LABEL: combine_unpckl_arg1_pshufb:
577 ; SSE-NEXT: movdqa %xmm1, %xmm0
578 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
581 ; AVX-LABEL: combine_unpckl_arg1_pshufb:
583 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
585 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
586 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1>)
590 define <8 x i16> @shuffle_combine_unpack_insert(<8 x i16> %a0) {
591 ; SSE-LABEL: shuffle_combine_unpack_insert:
593 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
596 ; AVX-LABEL: shuffle_combine_unpack_insert:
598 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
600 %1 = extractelement <8 x i16> %a0, i32 2
601 %2 = extractelement <8 x i16> %a0, i32 4
602 %3 = insertelement <8 x i16> %a0, i16 %1, i32 4
603 %4 = insertelement <8 x i16> %a0, i16 %2, i32 2
604 %5 = shufflevector <8 x i16> %3, <8 x i16> %4, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
605 %6 = shufflevector <8 x i16> %5, <8 x i16> %3, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
606 %7 = shufflevector <8 x i16> %5, <8 x i16> %a0, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
607 %8 = shufflevector <8 x i16> %6, <8 x i16> %7, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
611 define <16 x i8> @shuffle_combine_packssdw_pshufb(<4 x i32> %a0) {
612 ; SSE-LABEL: shuffle_combine_packssdw_pshufb:
614 ; SSE-NEXT: psrad $31, %xmm0
615 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13,12,9,8,5,4,1,0,13,12,9,8,5,4,1,0]
618 ; AVX-LABEL: shuffle_combine_packssdw_pshufb:
620 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
621 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13,12,9,8,5,4,1,0,13,12,9,8,5,4,1,0]
623 %1 = ashr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
624 %2 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %1, <4 x i32> %1)
625 %3 = bitcast <8 x i16> %2 to <16 x i8>
626 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8>)
629 declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
631 define <16 x i8> @shuffle_combine_packsswb_pshufb(<8 x i16> %a0, <8 x i16> %a1) {
632 ; SSE-LABEL: shuffle_combine_packsswb_pshufb:
634 ; SSE-NEXT: psraw $15, %xmm0
635 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0]
638 ; AVX-LABEL: shuffle_combine_packsswb_pshufb:
640 ; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
641 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0]
643 %1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
644 %2 = ashr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
645 %3 = tail call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %1, <8 x i16> %2)
646 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
649 declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
651 define <16 x i8> @shuffle_combine_packuswb_pshufb(<8 x i16> %a0, <8 x i16> %a1) {
652 ; SSE-LABEL: shuffle_combine_packuswb_pshufb:
654 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1]
657 ; AVX-LABEL: shuffle_combine_packuswb_pshufb:
659 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1]
661 %1 = lshr <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
662 %2 = lshr <8 x i16> %a1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
663 %3 = tail call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %1, <8 x i16> %2)
664 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
667 declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
669 define <16 x i8> @combine_pshufb_pshufb_or_as_blend(<16 x i8> %a0, <16 x i8> %a1) {
670 ; SSSE3-LABEL: combine_pshufb_pshufb_or_as_blend:
672 ; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
673 ; SSSE3-NEXT: movapd %xmm1, %xmm0
676 ; SSE41-LABEL: combine_pshufb_pshufb_or_as_blend:
678 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
681 ; AVX-LABEL: combine_pshufb_pshufb_or_as_blend:
683 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
685 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
686 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
687 %3 = or <16 x i8> %1, %2
691 define <16 x i8> @combine_pshufb_pshufb_or_as_unpcklbw(<16 x i8> %a0, <16 x i8> %a1) {
692 ; SSE-LABEL: combine_pshufb_pshufb_or_as_unpcklbw:
694 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
697 ; AVX-LABEL: combine_pshufb_pshufb_or_as_unpcklbw:
699 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
701 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 -1, i8 1, i8 -1, i8 2, i8 -1, i8 3, i8 -1, i8 4, i8 -1, i8 5, i8 -1, i8 6, i8 -1, i8 7, i8 -1>)
702 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a1, <16 x i8> <i8 -1, i8 0, i8 -1, i8 1, i8 -1, i8 2, i8 -1, i8 3, i8 -1, i8 4, i8 -1, i8 5, i8 -1, i8 6, i8 -1, i8 7>)
703 %3 = or <16 x i8> %1, %2
707 define <16 x i8> @combine_pshufb_pshufb_or_pshufb(<16 x i8> %a0) {
708 ; SSE-LABEL: combine_pshufb_pshufb_or_pshufb:
710 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
713 ; AVX1-LABEL: combine_pshufb_pshufb_or_pshufb:
715 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
718 ; AVX2-LABEL: combine_pshufb_pshufb_or_pshufb:
720 ; AVX2-NEXT: vbroadcastss %xmm0, %xmm0
723 ; AVX512F-LABEL: combine_pshufb_pshufb_or_pshufb:
725 ; AVX512F-NEXT: vbroadcastss %xmm0, %xmm0
727 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1>)
728 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3>)
729 %3 = or <16 x i8> %1, %2
730 %4 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
734 define <16 x i8> @constant_fold_pshufb() {
735 ; SSE-LABEL: constant_fold_pshufb:
737 ; SSE-NEXT: movaps {{.*#+}} xmm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9>
740 ; AVX-LABEL: constant_fold_pshufb:
742 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9>
744 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>)
748 ; FIXME - unnecessary pshufb/broadcast being used - pshufb mask only needs lowest byte.
749 define <16 x i8> @constant_fold_pshufb_2() {
750 ; SSE-LABEL: constant_fold_pshufb_2:
752 ; SSE-NEXT: movl $2, %eax
753 ; SSE-NEXT: movd %eax, %xmm0
754 ; SSE-NEXT: pxor %xmm1, %xmm1
755 ; SSE-NEXT: pshufb %xmm1, %xmm0
758 ; AVX1-LABEL: constant_fold_pshufb_2:
760 ; AVX1-NEXT: movl $2, %eax
761 ; AVX1-NEXT: vmovd %eax, %xmm0
762 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
763 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
766 ; AVX2-LABEL: constant_fold_pshufb_2:
768 ; AVX2-NEXT: movl $2, %eax
769 ; AVX2-NEXT: vmovd %eax, %xmm0
770 ; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
773 ; AVX512F-LABEL: constant_fold_pshufb_2:
775 ; AVX512F-NEXT: movl $2, %eax
776 ; AVX512F-NEXT: vmovd %eax, %xmm0
777 ; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm0
779 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 2, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
783 define i32 @mask_zzz3_v16i8(<16 x i8> %a0) {
784 ; SSSE3-LABEL: mask_zzz3_v16i8:
786 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[14,u,u,u,u,u,u,u,u,u,u,u,u]
787 ; SSSE3-NEXT: movd %xmm0, %eax
790 ; SSE41-LABEL: mask_zzz3_v16i8:
792 ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[14]
793 ; SSE41-NEXT: pextrd $3, %xmm0, %eax
796 ; AVX-LABEL: mask_zzz3_v16i8:
798 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[14]
799 ; AVX-NEXT: vpextrd $3, %xmm0, %eax
801 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>)
802 %2 = bitcast <16 x i8> %1 to <4 x i32>
803 %3 = extractelement <4 x i32> %2, i32 3
804 %4 = and i32 %3, 4278190080
808 define i32 @mask_z1z3_v16i8(<16 x i8> %a0) {
809 ; SSSE3-LABEL: mask_z1z3_v16i8:
811 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[10],zero,xmm0[14,u,u,u,u,u,u,u,u,u,u,u,u]
812 ; SSSE3-NEXT: movd %xmm0, %eax
815 ; SSE41-LABEL: mask_z1z3_v16i8:
817 ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,xmm0[10],zero,xmm0[14]
818 ; SSE41-NEXT: pextrd $3, %xmm0, %eax
821 ; AVX-LABEL: mask_z1z3_v16i8:
823 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,xmm0[10],zero,xmm0[14]
824 ; AVX-NEXT: vpextrd $3, %xmm0, %eax
826 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>)
827 %2 = bitcast <16 x i8> %1 to <4 x i32>
828 %3 = extractelement <4 x i32> %2, i32 3
829 %4 = and i32 %3, 4278255360
833 define i32 @PR22415(double %a0) {
834 ; SSE-LABEL: PR22415:
836 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
837 ; SSE-NEXT: movd %xmm0, %eax
840 ; AVX-LABEL: PR22415:
842 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
843 ; AVX-NEXT: vmovd %xmm0, %eax
845 %1 = bitcast double %a0 to <8 x i8>
846 %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 undef>
847 %3 = shufflevector <4 x i8> %2, <4 x i8> undef, <3 x i32> <i32 0, i32 1, i32 2>
848 %4 = bitcast <3 x i8> %3 to i24
849 %5 = zext i24 %4 to i32