1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512F
8 ; Combine tests involving SSE3/SSSE3 target shuffles (MOVDDUP, MOVSHDUP, MOVSLDUP, PSHUFB)
10 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
12 define <16 x i8> @combine_vpshufb_as_zero(<16 x i8> %a0) {
13 ; SSE-LABEL: combine_vpshufb_as_zero:
15 ; SSE-NEXT: xorps %xmm0, %xmm0
18 ; AVX-LABEL: combine_vpshufb_as_zero:
20 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
22 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
23 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 128, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
24 %res2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res1, <16 x i8> <i8 0, i8 1, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
28 define <16 x i8> @combine_vpshufb_as_movq(<16 x i8> %a0) {
29 ; SSE-LABEL: combine_vpshufb_as_movq:
31 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
34 ; AVX-LABEL: combine_vpshufb_as_movq:
36 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
38 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 128, i8 1, i8 128, i8 2, i8 128, i8 3, i8 128, i8 4, i8 128, i8 5, i8 128, i8 6, i8 128, i8 7, i8 128>)
39 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 1, i8 3, i8 5, i8 7, i8 9, i8 11, i8 13, i8 15>)
43 define <2 x double> @combine_pshufb_as_movsd(<2 x double> %a0, <2 x double> %a1) {
44 ; SSSE3-LABEL: combine_pshufb_as_movsd:
46 ; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
47 ; SSSE3-NEXT: movapd %xmm1, %xmm0
50 ; SSE41-LABEL: combine_pshufb_as_movsd:
52 ; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
55 ; AVX1-LABEL: combine_pshufb_as_movsd:
57 ; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
60 ; AVX2-LABEL: combine_pshufb_as_movsd:
62 ; AVX2-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
65 ; AVX512F-LABEL: combine_pshufb_as_movsd:
67 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
69 %1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 3, i32 0>
70 %2 = bitcast <2 x double> %1 to <16 x i8>
71 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
72 %4 = bitcast <16 x i8> %3 to <2 x double>
76 define <4 x float> @combine_pshufb_as_movss(<4 x float> %a0, <4 x float> %a1) {
77 ; SSSE3-LABEL: combine_pshufb_as_movss:
79 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
82 ; SSE41-LABEL: combine_pshufb_as_movss:
84 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
87 ; AVX1-LABEL: combine_pshufb_as_movss:
89 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
92 ; AVX2-LABEL: combine_pshufb_as_movss:
94 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
97 ; AVX512F-LABEL: combine_pshufb_as_movss:
99 ; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
101 %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 3, i32 2, i32 1>
102 %2 = bitcast <4 x float> %1 to <16 x i8>
103 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 12, i8 13, i8 14, i8 15, i8 8, i8 9, i8 10, i8 11, i8 4, i8 5, i8 6, i8 7>)
104 %4 = bitcast <16 x i8> %3 to <4 x float>
108 define <4 x i32> @combine_pshufb_as_zext(<16 x i8> %a0) {
109 ; SSSE3-LABEL: combine_pshufb_as_zext:
111 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
114 ; SSE41-LABEL: combine_pshufb_as_zext:
116 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
119 ; AVX-LABEL: combine_pshufb_as_zext:
121 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
123 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 2, i8 -1, i8 -1, i8 -1, i8 3, i8 -1, i8 -1, i8 -1>)
124 %2 = bitcast <16 x i8> %1 to <4 x i32>
128 define <2 x double> @combine_pshufb_as_vzmovl_64(<2 x double> %a0) {
129 ; SSE-LABEL: combine_pshufb_as_vzmovl_64:
131 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
134 ; AVX-LABEL: combine_pshufb_as_vzmovl_64:
136 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
138 %1 = bitcast <2 x double> %a0 to <16 x i8>
139 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
140 %3 = bitcast <16 x i8> %2 to <2 x double>
144 define <4 x float> @combine_pshufb_as_vzmovl_32(<4 x float> %a0) {
145 ; SSSE3-LABEL: combine_pshufb_as_vzmovl_32:
147 ; SSSE3-NEXT: xorps %xmm1, %xmm1
148 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
149 ; SSSE3-NEXT: movaps %xmm1, %xmm0
152 ; SSE41-LABEL: combine_pshufb_as_vzmovl_32:
154 ; SSE41-NEXT: xorps %xmm1, %xmm1
155 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
158 ; AVX1-LABEL: combine_pshufb_as_vzmovl_32:
160 ; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
161 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
164 ; AVX2-LABEL: combine_pshufb_as_vzmovl_32:
166 ; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
167 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
170 ; AVX512F-LABEL: combine_pshufb_as_vzmovl_32:
172 ; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
173 ; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
175 %1 = bitcast <4 x float> %a0 to <16 x i8>
176 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
177 %3 = bitcast <16 x i8> %2 to <4 x float>
181 define <4 x float> @combine_pshufb_movddup(<4 x float> %a0) {
182 ; SSE-LABEL: combine_pshufb_movddup:
184 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,5,5,5,5,7,7,7,7]
187 ; AVX-LABEL: combine_pshufb_movddup:
189 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,7,7,7,7,5,5,5,5,7,7,7,7]
191 %1 = bitcast <4 x float> %a0 to <16 x i8>
192 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
193 %3 = bitcast <16 x i8> %2 to <4 x float>
194 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
198 define <4 x float> @combine_pshufb_movshdup(<4 x float> %a0) {
199 ; SSE-LABEL: combine_pshufb_movshdup:
201 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[7,7,7,7,7,7,7,7,3,3,3,3,3,3,3,3]
204 ; AVX-LABEL: combine_pshufb_movshdup:
206 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[7,7,7,7,7,7,7,7,3,3,3,3,3,3,3,3]
208 %1 = bitcast <4 x float> %a0 to <16 x i8>
209 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
210 %3 = bitcast <16 x i8> %2 to <4 x float>
211 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
215 define <4 x float> @combine_pshufb_movsldup(<4 x float> %a0) {
216 ; SSE-LABEL: combine_pshufb_movsldup:
218 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,1,1,1,1,1,1,1,1]
221 ; AVX-LABEL: combine_pshufb_movsldup:
223 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[5,5,5,5,5,5,5,5,1,1,1,1,1,1,1,1]
225 %1 = bitcast <4 x float> %a0 to <16 x i8>
226 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>)
227 %3 = bitcast <16 x i8> %2 to <4 x float>
228 %4 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
232 define <16 x i8> @combine_pshufb_palignr(<16 x i8> %a0, <16 x i8> %a1) {
233 ; SSE-LABEL: combine_pshufb_palignr:
235 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
238 ; AVX-LABEL: combine_pshufb_palignr:
240 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
242 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
243 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
247 define <16 x i8> @combine_pshufb_pslldq(<16 x i8> %a0) {
248 ; SSE-LABEL: combine_pshufb_pslldq:
250 ; SSE-NEXT: xorps %xmm0, %xmm0
253 ; AVX-LABEL: combine_pshufb_pslldq:
255 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
257 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
258 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
262 define <16 x i8> @combine_pshufb_psrldq(<16 x i8> %a0) {
263 ; SSE-LABEL: combine_pshufb_psrldq:
265 ; SSE-NEXT: xorps %xmm0, %xmm0
268 ; AVX-LABEL: combine_pshufb_psrldq:
270 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
272 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
273 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
277 define <16 x i8> @combine_and_pshufb(<16 x i8> %a0) {
278 ; SSSE3-LABEL: combine_and_pshufb:
280 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
283 ; SSE41-LABEL: combine_and_pshufb:
285 ; SSE41-NEXT: pxor %xmm1, %xmm1
286 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
289 ; AVX-LABEL: combine_and_pshufb:
291 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
292 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
294 %1 = shufflevector <16 x i8> %a0, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
295 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
299 define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) {
300 ; SSSE3-LABEL: combine_pshufb_and:
302 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
305 ; SSE41-LABEL: combine_pshufb_and:
307 ; SSE41-NEXT: pxor %xmm1, %xmm1
308 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
311 ; AVX-LABEL: combine_pshufb_and:
313 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
314 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
316 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
317 %2 = shufflevector <16 x i8> %1, <16 x i8> zeroinitializer, <16 x i32> <i32 16, i32 16, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
321 define <16 x i8> @combine_pshufb_as_palignr(<16 x i8> %a0) {
322 ; SSE-LABEL: combine_pshufb_as_palignr:
324 ; SSE-NEXT: palignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
327 ; AVX-LABEL: combine_pshufb_as_palignr:
329 ; AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,0]
331 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 undef, i8 undef, i8 0>)
335 define <16 x i8> @combine_pshufb_as_pslldq(<16 x i8> %a0) {
336 ; SSE-LABEL: combine_pshufb_as_pslldq:
338 ; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
341 ; AVX-LABEL: combine_pshufb_as_pslldq:
343 ; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
345 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>)
349 define <16 x i8> @combine_pshufb_as_psrldq(<16 x i8> %a0) {
350 ; SSE-LABEL: combine_pshufb_as_psrldq:
352 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
355 ; AVX-LABEL: combine_pshufb_as_psrldq:
357 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
359 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 15, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128>)
363 define <16 x i8> @combine_pshufb_as_psrlw(<16 x i8> %a0) {
364 ; SSE-LABEL: combine_pshufb_as_psrlw:
366 ; SSE-NEXT: psrlw $8, %xmm0
369 ; AVX-LABEL: combine_pshufb_as_psrlw:
371 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
373 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 1, i8 128, i8 3, i8 128, i8 5, i8 128, i8 7, i8 128, i8 9, i8 128, i8 11, i8 128, i8 13, i8 128, i8 15, i8 128>)
377 define <16 x i8> @combine_pshufb_as_pslld(<16 x i8> %a0) {
378 ; SSE-LABEL: combine_pshufb_as_pslld:
380 ; SSE-NEXT: pslld $24, %xmm0
383 ; AVX-LABEL: combine_pshufb_as_pslld:
385 ; AVX-NEXT: vpslld $24, %xmm0, %xmm0
387 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 128, i8 128, i8 128, i8 0, i8 128, i8 128, i8 128, i8 4, i8 128, i8 128, i8 128, i8 8, i8 128, i8 128, i8 128, i8 12>)
391 define <16 x i8> @combine_pshufb_as_psrlq(<16 x i8> %a0) {
392 ; SSE-LABEL: combine_pshufb_as_psrlq:
394 ; SSE-NEXT: psrlq $40, %xmm0
397 ; AVX-LABEL: combine_pshufb_as_psrlq:
399 ; AVX-NEXT: vpsrlq $40, %xmm0, %xmm0
401 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 5, i8 6, i8 7, i8 128, i8 128, i8 128, i8 128, i8 128, i8 13, i8 14, i8 15, i8 128, i8 128, i8 128, i8 128, i8 128>)
405 define <16 x i8> @combine_pshufb_as_pshuflw(<16 x i8> %a0) {
406 ; SSE-LABEL: combine_pshufb_as_pshuflw:
408 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
411 ; AVX-LABEL: combine_pshufb_as_pshuflw:
413 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7]
415 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
419 define <16 x i8> @combine_pshufb_as_pshufhw(<16 x i8> %a0) {
420 ; SSE-LABEL: combine_pshufb_as_pshufhw:
422 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
425 ; AVX-LABEL: combine_pshufb_as_pshufhw:
427 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6]
429 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
433 define <16 x i8> @combine_pshufb_not_as_pshufw(<16 x i8> %a0) {
434 ; SSE-LABEL: combine_pshufb_not_as_pshufw:
436 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
439 ; AVX-LABEL: combine_pshufb_not_as_pshufw:
441 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13]
443 %res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 2, i8 3, i8 0, i8 1, i8 6, i8 7, i8 4, i8 5, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
444 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 10, i8 11, i8 8, i8 9, i8 14, i8 15, i8 12, i8 13>)
448 define <16 x i8> @combine_vpshufb_as_pshuflw_not_pslld(<16 x i8> *%a0) {
449 ; SSE-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
451 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
454 ; AVX-LABEL: combine_vpshufb_as_pshuflw_not_pslld:
456 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = mem[0,0,2,2,4,5,6,7]
458 %res0 = load <16 x i8>, <16 x i8> *%a0, align 16
459 %res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
463 define <16 x i8> @combine_pshufb_as_unary_unpcklbw(<16 x i8> %a0) {
464 ; SSE-LABEL: combine_pshufb_as_unary_unpcklbw:
466 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
469 ; AVX-LABEL: combine_pshufb_as_unary_unpcklbw:
471 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
473 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 undef, i8 undef, i8 1, i8 2, i8 2, i8 3, i8 3, i8 4, i8 4, i8 5, i8 5, i8 6, i8 6, i8 7, i8 7>)
477 define <16 x i8> @combine_pshufb_as_unary_unpckhwd(<16 x i8> %a0) {
478 ; SSE-LABEL: combine_pshufb_as_unary_unpckhwd:
480 ; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
483 ; AVX-LABEL: combine_pshufb_as_unary_unpckhwd:
485 ; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
487 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 9, i8 8, i8 9, i8 10, i8 11, i8 10, i8 11, i8 12, i8 13, i8 12, i8 13, i8 14, i8 15, i8 undef, i8 undef>)
491 define <8 x i16> @combine_pshufb_as_unpacklo_undef(<16 x i8> %a0) {
492 ; ALL-LABEL: combine_pshufb_as_unpacklo_undef:
495 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 undef, i8 undef, i8 0, i8 1, i8 undef, i8 undef, i8 2, i8 3, i8 undef, i8 undef, i8 4, i8 5, i8 undef, i8 undef, i8 6, i8 7>)
496 %2 = bitcast <16 x i8> %1 to <8 x i16>
497 %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
501 define <16 x i8> @combine_pshufb_as_unpackhi_undef(<16 x i8> %a0) {
502 ; ALL-LABEL: combine_pshufb_as_unpackhi_undef:
505 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 undef, i8 9, i8 undef, i8 10, i8 undef, i8 11, i8 undef, i8 12, i8 undef, i8 13, i8 undef, i8 14, i8 undef, i8 15, i8 undef>)
506 %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
510 define <16 x i8> @combine_pshufb_as_unpacklo_zero(<16 x i8> %a0) {
511 ; SSE-LABEL: combine_pshufb_as_unpacklo_zero:
513 ; SSE-NEXT: xorps %xmm1, %xmm1
514 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
515 ; SSE-NEXT: movaps %xmm1, %xmm0
518 ; AVX-LABEL: combine_pshufb_as_unpacklo_zero:
520 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
521 ; AVX-NEXT: vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
523 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 4, i8 5, i8 6, i8 7>)
527 define <16 x i8> @combine_pshufb_as_unpackhi_zero(<16 x i8> %a0) {
528 ; SSE-LABEL: combine_pshufb_as_unpackhi_zero:
530 ; SSE-NEXT: pxor %xmm1, %xmm1
531 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
534 ; AVX-LABEL: combine_pshufb_as_unpackhi_zero:
536 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
537 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
539 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 8, i8 -1, i8 9, i8 -1, i8 10, i8 -1, i8 11, i8 -1, i8 12, i8 -1, i8 13, i8 -1, i8 14, i8 -1, i8 15, i8 -1>)
543 define <16 x i8> @combine_psrlw_pshufb(<8 x i16> %a0) {
544 ; SSE-LABEL: combine_psrlw_pshufb:
546 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero
549 ; AVX-LABEL: combine_psrlw_pshufb:
551 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[1],zero,zero,zero
553 %1 = lshr <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
554 %2 = bitcast <8 x i16> %1 to <16 x i8>
555 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1>)
559 define <16 x i8> @combine_pslld_pshufb(<4 x i32> %a0) {
560 ; SSE-LABEL: combine_pslld_pshufb:
562 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,1,0],zero,xmm0[6,5,4],zero,xmm0[10,9,8],zero,xmm0[14,13,12],zero
565 ; AVX-LABEL: combine_pslld_pshufb:
567 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,1,0],zero,xmm0[6,5,4],zero,xmm0[10,9,8],zero,xmm0[14,13,12],zero
569 %1 = shl <4 x i32> %a0, <i32 8, i32 8, i32 8, i32 8>
570 %2 = bitcast <4 x i32> %1 to <16 x i8>
571 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 11, i8 10, i8 9, i8 8, i8 15, i8 14, i8 13, i8 12>)
575 define <16 x i8> @combine_psrlq_pshufb(<2 x i64> %a0) {
576 ; SSE-LABEL: combine_psrlq_pshufb:
578 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[7,6],zero,zero,zero,zero,zero,zero,xmm0[15,14]
581 ; AVX-LABEL: combine_psrlq_pshufb:
583 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,xmm0[7,6],zero,zero,zero,zero,zero,zero,xmm0[15,14]
585 %1 = lshr <2 x i64> %a0, <i64 48, i64 48>
586 %2 = bitcast <2 x i64> %1 to <16 x i8>
587 %3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8>)
591 define <16 x i8> @combine_unpckl_arg0_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
592 ; SSE-LABEL: combine_unpckl_arg0_pshufb:
594 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
597 ; AVX-LABEL: combine_unpckl_arg0_pshufb:
599 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
601 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
602 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1>)
606 define <16 x i8> @combine_unpckl_arg1_pshufb(<16 x i8> %a0, <16 x i8> %a1) {
607 ; SSE-LABEL: combine_unpckl_arg1_pshufb:
609 ; SSE-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
610 ; SSE-NEXT: movdqa %xmm1, %xmm0
613 ; AVX-LABEL: combine_unpckl_arg1_pshufb:
615 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
617 %1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
618 %2 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1>)
622 define <8 x i16> @shuffle_combine_unpack_insert(<8 x i16> %a0) {
623 ; SSE-LABEL: shuffle_combine_unpack_insert:
625 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
628 ; AVX-LABEL: shuffle_combine_unpack_insert:
630 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,8,9,8,9,10,11,10,11]
632 %1 = extractelement <8 x i16> %a0, i32 2
633 %2 = extractelement <8 x i16> %a0, i32 4
634 %3 = insertelement <8 x i16> %a0, i16 %1, i32 4
635 %4 = insertelement <8 x i16> %a0, i16 %2, i32 2
636 %5 = shufflevector <8 x i16> %3, <8 x i16> %4, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
637 %6 = shufflevector <8 x i16> %5, <8 x i16> %3, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
638 %7 = shufflevector <8 x i16> %5, <8 x i16> %a0, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
639 %8 = shufflevector <8 x i16> %6, <8 x i16> %7, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
643 define <16 x i8> @shuffle_combine_packssdw_pshufb(<4 x i32> %a0) {
644 ; SSE-LABEL: shuffle_combine_packssdw_pshufb:
646 ; SSE-NEXT: psrad $31, %xmm0
647 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13,12,9,8,5,4,1,0,13,12,9,8,5,4,1,0]
650 ; AVX-LABEL: shuffle_combine_packssdw_pshufb:
652 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
653 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13,12,9,8,5,4,1,0,13,12,9,8,5,4,1,0]
655 %1 = ashr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
656 %2 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %1, <4 x i32> %1)
657 %3 = bitcast <8 x i16> %2 to <16 x i8>
658 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8>)
661 declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone
663 define <16 x i8> @shuffle_combine_packsswb_pshufb(<8 x i16> %a0, <8 x i16> %a1) {
664 ; SSE-LABEL: shuffle_combine_packsswb_pshufb:
666 ; SSE-NEXT: psraw $15, %xmm0
667 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0]
670 ; AVX-LABEL: shuffle_combine_packsswb_pshufb:
672 ; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
673 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[14,12,10,8,6,4,2,0,14,12,10,8,6,4,2,0]
675 %1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
676 %2 = ashr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
677 %3 = tail call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %1, <8 x i16> %2)
678 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
681 declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone
683 define <16 x i8> @shuffle_combine_packuswb_pshufb(<8 x i16> %a0, <8 x i16> %a1) {
684 ; SSE-LABEL: shuffle_combine_packuswb_pshufb:
686 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1]
689 ; AVX-LABEL: shuffle_combine_packuswb_pshufb:
691 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,13,11,9,7,5,3,1,15,13,11,9,7,5,3,1]
693 %1 = lshr <8 x i16> %a0, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
694 %2 = lshr <8 x i16> %a1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
695 %3 = tail call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %1, <8 x i16> %2)
696 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %3, <16 x i8> <i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>)
699 declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
701 define <16 x i8> @constant_fold_pshufb() {
702 ; SSE-LABEL: constant_fold_pshufb:
704 ; SSE-NEXT: movaps {{.*#+}} xmm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9>
707 ; AVX-LABEL: constant_fold_pshufb:
709 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = <14,0,0,0,u,u,0,0,0,0,0,0,0,0,8,9>
711 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> <i8 1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 -1, i8 -1, i8 15, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 7, i8 6>)
715 ; FIXME - unnecessary pshufb/broadcast being used - pshufb mask only needs lowest byte.
716 define <16 x i8> @constant_fold_pshufb_2() {
717 ; SSE-LABEL: constant_fold_pshufb_2:
719 ; SSE-NEXT: movl $2, %eax
720 ; SSE-NEXT: movd %eax, %xmm0
721 ; SSE-NEXT: pxor %xmm1, %xmm1
722 ; SSE-NEXT: pshufb %xmm1, %xmm0
725 ; AVX1-LABEL: constant_fold_pshufb_2:
727 ; AVX1-NEXT: movl $2, %eax
728 ; AVX1-NEXT: vmovd %eax, %xmm0
729 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
730 ; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
733 ; AVX2-LABEL: constant_fold_pshufb_2:
735 ; AVX2-NEXT: movl $2, %eax
736 ; AVX2-NEXT: vmovd %eax, %xmm0
737 ; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
740 ; AVX512F-LABEL: constant_fold_pshufb_2:
742 ; AVX512F-NEXT: movl $2, %eax
743 ; AVX512F-NEXT: vmovd %eax, %xmm0
744 ; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm0
746 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> <i8 2, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> <i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>)
750 define i32 @mask_zzz3_v16i8(<16 x i8> %a0) {
751 ; SSSE3-LABEL: mask_zzz3_v16i8:
753 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[14,u,u,u,u,u,u,u,u,u,u,u,u]
754 ; SSSE3-NEXT: movd %xmm0, %eax
757 ; SSE41-LABEL: mask_zzz3_v16i8:
759 ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[14]
760 ; SSE41-NEXT: pextrd $3, %xmm0, %eax
763 ; AVX-LABEL: mask_zzz3_v16i8:
765 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[14]
766 ; AVX-NEXT: vpextrd $3, %xmm0, %eax
768 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>)
769 %2 = bitcast <16 x i8> %1 to <4 x i32>
770 %3 = extractelement <4 x i32> %2, i32 3
771 %4 = and i32 %3, 4278190080
775 define i32 @mask_z1z3_v16i8(<16 x i8> %a0) {
776 ; SSSE3-LABEL: mask_z1z3_v16i8:
778 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[10],zero,xmm0[14,u,u,u,u,u,u,u,u,u,u,u,u]
779 ; SSSE3-NEXT: movd %xmm0, %eax
782 ; SSE41-LABEL: mask_z1z3_v16i8:
784 ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,xmm0[10],zero,xmm0[14]
785 ; SSE41-NEXT: pextrd $3, %xmm0, %eax
788 ; AVX-LABEL: mask_z1z3_v16i8:
790 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u],zero,xmm0[10],zero,xmm0[14]
791 ; AVX-NEXT: vpextrd $3, %xmm0, %eax
793 %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14>)
794 %2 = bitcast <16 x i8> %1 to <4 x i32>
795 %3 = extractelement <4 x i32> %2, i32 3
796 %4 = and i32 %3, 4278255360
800 define i32 @PR22415(double %a0) {
801 ; SSE-LABEL: PR22415:
803 ; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
804 ; SSE-NEXT: movd %xmm0, %eax
807 ; AVX-LABEL: PR22415:
809 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4],zero,xmm0[u,u,u,u,u,u,u,u,u,u,u,u]
810 ; AVX-NEXT: vmovd %xmm0, %eax
812 %1 = bitcast double %a0 to <8 x i8>
813 %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 undef>
814 %3 = shufflevector <4 x i8> %2, <4 x i8> undef, <3 x i32> <i32 0, i32 1, i32 2>
815 %4 = bitcast <3 x i8> %3 to i24
816 %5 = zext i24 %4 to i32