1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3 ; RUN: llc < %s -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
4 ; RUN: llc < %s -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
5 ; RUN: llc < %s -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
6 ; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-SLOW
7 ; RUN: llc < %s -mcpu=x86-64 -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-FAST
9 ; Verify that the DAG combiner correctly folds bitwise operations across
10 ; shuffles, nested shuffles with undef, pairs of nested shuffles, and other
11 ; basic and always-safe patterns. Also test that the DAG combiner will combine
12 ; target-specific shuffle instructions where reasonable.
14 target triple = "x86_64-unknown-unknown"
16 declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8)
17 declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8)
18 declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8)
20 define <4 x i32> @combine_pshufd1(<4 x i32> %a) {
21 ; CHECK-LABEL: combine_pshufd1:
22 ; CHECK: # %bb.0: # %entry
25 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
26 %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 27)
30 define <4 x i32> @combine_pshufd2(<4 x i32> %a) {
31 ; CHECK-LABEL: combine_pshufd2:
32 ; CHECK: # %bb.0: # %entry
35 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
36 %b.cast = bitcast <4 x i32> %b to <8 x i16>
37 %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 -28)
38 %c.cast = bitcast <8 x i16> %c to <4 x i32>
39 %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
43 define <4 x i32> @combine_pshufd3(<4 x i32> %a) {
44 ; CHECK-LABEL: combine_pshufd3:
45 ; CHECK: # %bb.0: # %entry
48 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
49 %b.cast = bitcast <4 x i32> %b to <8 x i16>
50 %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 -28)
51 %c.cast = bitcast <8 x i16> %c to <4 x i32>
52 %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
56 define <4 x i32> @combine_pshufd4(<4 x i32> %a) {
57 ; SSE-LABEL: combine_pshufd4:
58 ; SSE: # %bb.0: # %entry
59 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
62 ; AVX-LABEL: combine_pshufd4:
63 ; AVX: # %bb.0: # %entry
64 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
67 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -31)
68 %b.cast = bitcast <4 x i32> %b to <8 x i16>
69 %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 27)
70 %c.cast = bitcast <8 x i16> %c to <4 x i32>
71 %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -31)
75 define <4 x i32> @combine_pshufd5(<4 x i32> %a) {
76 ; SSE-LABEL: combine_pshufd5:
77 ; SSE: # %bb.0: # %entry
78 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
81 ; AVX-LABEL: combine_pshufd5:
82 ; AVX: # %bb.0: # %entry
83 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
86 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -76)
87 %b.cast = bitcast <4 x i32> %b to <8 x i16>
88 %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 27)
89 %c.cast = bitcast <8 x i16> %c to <4 x i32>
90 %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -76)
94 define <4 x i32> @combine_pshufd6(<4 x i32> %a) {
95 ; SSE-LABEL: combine_pshufd6:
96 ; SSE: # %bb.0: # %entry
97 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
100 ; AVX1-LABEL: combine_pshufd6:
101 ; AVX1: # %bb.0: # %entry
102 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
105 ; AVX2-LABEL: combine_pshufd6:
106 ; AVX2: # %bb.0: # %entry
107 ; AVX2-NEXT: vbroadcastss %xmm0, %xmm0
110 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 0)
111 %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 8)
115 define <8 x i16> @combine_pshuflw1(<8 x i16> %a) {
116 ; CHECK-LABEL: combine_pshuflw1:
117 ; CHECK: # %bb.0: # %entry
120 %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
121 %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
125 define <8 x i16> @combine_pshuflw2(<8 x i16> %a) {
126 ; CHECK-LABEL: combine_pshuflw2:
127 ; CHECK: # %bb.0: # %entry
130 %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
131 %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 -28)
132 %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
136 define <8 x i16> @combine_pshuflw3(<8 x i16> %a) {
137 ; SSE-LABEL: combine_pshuflw3:
138 ; SSE: # %bb.0: # %entry
139 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
142 ; AVX-LABEL: combine_pshuflw3:
143 ; AVX: # %bb.0: # %entry
144 ; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
147 %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
148 %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 27)
149 %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
153 define <8 x i16> @combine_pshufhw1(<8 x i16> %a) {
154 ; SSE-LABEL: combine_pshufhw1:
155 ; SSE: # %bb.0: # %entry
156 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
159 ; AVX-LABEL: combine_pshufhw1:
160 ; AVX: # %bb.0: # %entry
161 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
164 %b = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27)
165 %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
166 %d = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %c, i8 27)
170 define <4 x i32> @combine_bitwise_ops_test1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
171 ; SSE-LABEL: combine_bitwise_ops_test1:
173 ; SSE-NEXT: pand %xmm1, %xmm0
174 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
177 ; AVX-LABEL: combine_bitwise_ops_test1:
179 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
180 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
182 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
183 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
184 %and = and <4 x i32> %shuf1, %shuf2
188 define <4 x i32> @combine_bitwise_ops_test2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
189 ; SSE-LABEL: combine_bitwise_ops_test2:
191 ; SSE-NEXT: por %xmm1, %xmm0
192 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
195 ; AVX-LABEL: combine_bitwise_ops_test2:
197 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
198 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
200 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
201 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
202 %or = or <4 x i32> %shuf1, %shuf2
206 define <4 x i32> @combine_bitwise_ops_test3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
207 ; SSE-LABEL: combine_bitwise_ops_test3:
209 ; SSE-NEXT: pxor %xmm1, %xmm0
210 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
213 ; AVX-LABEL: combine_bitwise_ops_test3:
215 ; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
216 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
218 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
219 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 1, i32 3>
220 %xor = xor <4 x i32> %shuf1, %shuf2
224 define <4 x i32> @combine_bitwise_ops_test4(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
225 ; SSE-LABEL: combine_bitwise_ops_test4:
227 ; SSE-NEXT: pand %xmm1, %xmm0
228 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
231 ; AVX-LABEL: combine_bitwise_ops_test4:
233 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
234 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
236 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 4, i32 6, i32 5, i32 7>
237 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 4, i32 6, i32 5, i32 7>
238 %and = and <4 x i32> %shuf1, %shuf2
242 define <4 x i32> @combine_bitwise_ops_test5(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
243 ; SSE-LABEL: combine_bitwise_ops_test5:
245 ; SSE-NEXT: por %xmm1, %xmm0
246 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
249 ; AVX-LABEL: combine_bitwise_ops_test5:
251 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
252 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
254 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 4, i32 6, i32 5, i32 7>
255 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 4, i32 6, i32 5, i32 7>
256 %or = or <4 x i32> %shuf1, %shuf2
260 define <4 x i32> @combine_bitwise_ops_test6(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
261 ; SSE-LABEL: combine_bitwise_ops_test6:
263 ; SSE-NEXT: pxor %xmm1, %xmm0
264 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
267 ; AVX-LABEL: combine_bitwise_ops_test6:
269 ; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
270 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
272 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 4, i32 6, i32 5, i32 7>
273 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 4, i32 6, i32 5, i32 7>
274 %xor = xor <4 x i32> %shuf1, %shuf2
279 ; Verify that DAGCombiner moves the shuffle after the xor/and/or even if shuffles
280 ; are not performing a swizzle operations.
282 define <4 x i32> @combine_bitwise_ops_test1b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
283 ; SSE2-LABEL: combine_bitwise_ops_test1b:
285 ; SSE2-NEXT: pand %xmm1, %xmm0
286 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
287 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
288 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
291 ; SSSE3-LABEL: combine_bitwise_ops_test1b:
293 ; SSSE3-NEXT: pand %xmm1, %xmm0
294 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
295 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
296 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
299 ; SSE41-LABEL: combine_bitwise_ops_test1b:
301 ; SSE41-NEXT: andps %xmm1, %xmm0
302 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
305 ; AVX-LABEL: combine_bitwise_ops_test1b:
307 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
308 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
310 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
311 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
312 %and = and <4 x i32> %shuf1, %shuf2
316 define <4 x i32> @combine_bitwise_ops_test2b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
317 ; SSE2-LABEL: combine_bitwise_ops_test2b:
319 ; SSE2-NEXT: por %xmm1, %xmm0
320 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
321 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
322 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
325 ; SSSE3-LABEL: combine_bitwise_ops_test2b:
327 ; SSSE3-NEXT: por %xmm1, %xmm0
328 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
329 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
330 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
333 ; SSE41-LABEL: combine_bitwise_ops_test2b:
335 ; SSE41-NEXT: orps %xmm1, %xmm0
336 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
339 ; AVX-LABEL: combine_bitwise_ops_test2b:
341 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
342 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
344 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
345 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
346 %or = or <4 x i32> %shuf1, %shuf2
350 define <4 x i32> @combine_bitwise_ops_test3b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
351 ; SSE2-LABEL: combine_bitwise_ops_test3b:
353 ; SSE2-NEXT: xorps %xmm1, %xmm0
354 ; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
357 ; SSSE3-LABEL: combine_bitwise_ops_test3b:
359 ; SSSE3-NEXT: xorps %xmm1, %xmm0
360 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
363 ; SSE41-LABEL: combine_bitwise_ops_test3b:
365 ; SSE41-NEXT: xorps %xmm1, %xmm0
366 ; SSE41-NEXT: xorps %xmm1, %xmm1
367 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
370 ; AVX-LABEL: combine_bitwise_ops_test3b:
372 ; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
373 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
374 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
376 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
377 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 5, i32 2, i32 7>
378 %xor = xor <4 x i32> %shuf1, %shuf2
382 define <4 x i32> @combine_bitwise_ops_test4b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
383 ; SSE2-LABEL: combine_bitwise_ops_test4b:
385 ; SSE2-NEXT: pand %xmm1, %xmm0
386 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
387 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
388 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
391 ; SSSE3-LABEL: combine_bitwise_ops_test4b:
393 ; SSSE3-NEXT: pand %xmm1, %xmm0
394 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
395 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
396 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
399 ; SSE41-LABEL: combine_bitwise_ops_test4b:
401 ; SSE41-NEXT: andps %xmm1, %xmm0
402 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
405 ; AVX-LABEL: combine_bitwise_ops_test4b:
407 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
408 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
410 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 5, i32 2, i32 7>
411 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 5, i32 2, i32 7>
412 %and = and <4 x i32> %shuf1, %shuf2
416 define <4 x i32> @combine_bitwise_ops_test5b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
417 ; SSE2-LABEL: combine_bitwise_ops_test5b:
419 ; SSE2-NEXT: por %xmm1, %xmm0
420 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
421 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
422 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
425 ; SSSE3-LABEL: combine_bitwise_ops_test5b:
427 ; SSSE3-NEXT: por %xmm1, %xmm0
428 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
429 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
430 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
433 ; SSE41-LABEL: combine_bitwise_ops_test5b:
435 ; SSE41-NEXT: orps %xmm1, %xmm0
436 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
439 ; AVX-LABEL: combine_bitwise_ops_test5b:
441 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
442 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
444 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 5, i32 2, i32 7>
445 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 5, i32 2, i32 7>
446 %or = or <4 x i32> %shuf1, %shuf2
450 define <4 x i32> @combine_bitwise_ops_test6b(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
451 ; SSE2-LABEL: combine_bitwise_ops_test6b:
453 ; SSE2-NEXT: xorps %xmm1, %xmm0
454 ; SSE2-NEXT: andps {{.*}}(%rip), %xmm0
457 ; SSSE3-LABEL: combine_bitwise_ops_test6b:
459 ; SSSE3-NEXT: xorps %xmm1, %xmm0
460 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
463 ; SSE41-LABEL: combine_bitwise_ops_test6b:
465 ; SSE41-NEXT: xorps %xmm1, %xmm0
466 ; SSE41-NEXT: xorps %xmm1, %xmm1
467 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
470 ; AVX-LABEL: combine_bitwise_ops_test6b:
472 ; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
473 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
474 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
476 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 5, i32 2, i32 7>
477 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 5, i32 2, i32 7>
478 %xor = xor <4 x i32> %shuf1, %shuf2
482 define <4 x i32> @combine_bitwise_ops_test1c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
483 ; SSE-LABEL: combine_bitwise_ops_test1c:
485 ; SSE-NEXT: andps %xmm1, %xmm0
486 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
489 ; AVX-LABEL: combine_bitwise_ops_test1c:
491 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
492 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
494 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
495 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
496 %and = and <4 x i32> %shuf1, %shuf2
500 define <4 x i32> @combine_bitwise_ops_test2c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
501 ; SSE-LABEL: combine_bitwise_ops_test2c:
503 ; SSE-NEXT: orps %xmm1, %xmm0
504 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
507 ; AVX-LABEL: combine_bitwise_ops_test2c:
509 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
510 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[1,3]
512 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
513 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
514 %or = or <4 x i32> %shuf1, %shuf2
518 define <4 x i32> @combine_bitwise_ops_test3c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
519 ; SSE2-LABEL: combine_bitwise_ops_test3c:
521 ; SSE2-NEXT: xorps %xmm1, %xmm0
522 ; SSE2-NEXT: xorps %xmm1, %xmm1
523 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
526 ; SSSE3-LABEL: combine_bitwise_ops_test3c:
528 ; SSSE3-NEXT: xorps %xmm1, %xmm0
529 ; SSSE3-NEXT: xorps %xmm1, %xmm1
530 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
533 ; SSE41-LABEL: combine_bitwise_ops_test3c:
535 ; SSE41-NEXT: xorps %xmm1, %xmm0
536 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
539 ; AVX-LABEL: combine_bitwise_ops_test3c:
541 ; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
542 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
544 %shuf1 = shufflevector <4 x i32> %a, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
545 %shuf2 = shufflevector <4 x i32> %b, <4 x i32> %c, <4 x i32><i32 0, i32 2, i32 5, i32 7>
546 %xor = xor <4 x i32> %shuf1, %shuf2
550 define <4 x i32> @combine_bitwise_ops_test4c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
551 ; SSE-LABEL: combine_bitwise_ops_test4c:
553 ; SSE-NEXT: andps %xmm1, %xmm0
554 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
555 ; SSE-NEXT: movaps %xmm2, %xmm0
558 ; AVX-LABEL: combine_bitwise_ops_test4c:
560 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
561 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
563 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
564 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
565 %and = and <4 x i32> %shuf1, %shuf2
569 define <4 x i32> @combine_bitwise_ops_test5c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
570 ; SSE-LABEL: combine_bitwise_ops_test5c:
572 ; SSE-NEXT: orps %xmm1, %xmm0
573 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3]
574 ; SSE-NEXT: movaps %xmm2, %xmm0
577 ; AVX-LABEL: combine_bitwise_ops_test5c:
579 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
580 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm2[0,2],xmm0[1,3]
582 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
583 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
584 %or = or <4 x i32> %shuf1, %shuf2
588 define <4 x i32> @combine_bitwise_ops_test6c(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
589 ; SSE2-LABEL: combine_bitwise_ops_test6c:
591 ; SSE2-NEXT: xorps %xmm1, %xmm0
592 ; SSE2-NEXT: xorps %xmm1, %xmm1
593 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,3]
594 ; SSE2-NEXT: movaps %xmm1, %xmm0
597 ; SSSE3-LABEL: combine_bitwise_ops_test6c:
599 ; SSSE3-NEXT: xorps %xmm1, %xmm0
600 ; SSSE3-NEXT: xorps %xmm1, %xmm1
601 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,3]
602 ; SSSE3-NEXT: movaps %xmm1, %xmm0
605 ; SSE41-LABEL: combine_bitwise_ops_test6c:
607 ; SSE41-NEXT: xorps %xmm1, %xmm0
608 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = zero,zero,xmm0[1,3]
611 ; AVX-LABEL: combine_bitwise_ops_test6c:
613 ; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
614 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[1,3]
616 %shuf1 = shufflevector <4 x i32> %c, <4 x i32> %a, <4 x i32><i32 0, i32 2, i32 5, i32 7>
617 %shuf2 = shufflevector <4 x i32> %c, <4 x i32> %b, <4 x i32><i32 0, i32 2, i32 5, i32 7>
618 %xor = xor <4 x i32> %shuf1, %shuf2
622 define <4 x i32> @combine_nested_undef_test1(<4 x i32> %A, <4 x i32> %B) {
623 ; SSE-LABEL: combine_nested_undef_test1:
625 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
628 ; AVX-LABEL: combine_nested_undef_test1:
630 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
632 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
633 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
637 define <4 x i32> @combine_nested_undef_test2(<4 x i32> %A, <4 x i32> %B) {
638 ; SSE-LABEL: combine_nested_undef_test2:
640 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
643 ; AVX-LABEL: combine_nested_undef_test2:
645 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
647 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
648 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
652 define <4 x i32> @combine_nested_undef_test3(<4 x i32> %A, <4 x i32> %B) {
653 ; SSE-LABEL: combine_nested_undef_test3:
655 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
658 ; AVX-LABEL: combine_nested_undef_test3:
660 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
662 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
663 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 3>
667 define <4 x i32> @combine_nested_undef_test4(<4 x i32> %A, <4 x i32> %B) {
668 ; SSE-LABEL: combine_nested_undef_test4:
670 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
673 ; AVX1-LABEL: combine_nested_undef_test4:
675 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
678 ; AVX2-LABEL: combine_nested_undef_test4:
680 ; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
682 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 7, i32 1>
683 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 4, i32 0, i32 3>
687 define <4 x i32> @combine_nested_undef_test5(<4 x i32> %A, <4 x i32> %B) {
688 ; SSE-LABEL: combine_nested_undef_test5:
690 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
693 ; AVX-LABEL: combine_nested_undef_test5:
695 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
697 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 5, i32 5, i32 2, i32 3>
698 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 4, i32 3>
702 define <4 x i32> @combine_nested_undef_test6(<4 x i32> %A, <4 x i32> %B) {
703 ; SSE-LABEL: combine_nested_undef_test6:
705 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
708 ; AVX-LABEL: combine_nested_undef_test6:
710 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
712 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
713 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 4, i32 0, i32 4>
717 define <4 x i32> @combine_nested_undef_test7(<4 x i32> %A, <4 x i32> %B) {
718 ; SSE-LABEL: combine_nested_undef_test7:
720 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
723 ; AVX-LABEL: combine_nested_undef_test7:
725 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
727 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
728 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
732 define <4 x i32> @combine_nested_undef_test8(<4 x i32> %A, <4 x i32> %B) {
733 ; SSE-LABEL: combine_nested_undef_test8:
735 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
738 ; AVX-LABEL: combine_nested_undef_test8:
740 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,3,3]
742 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
743 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
747 define <4 x i32> @combine_nested_undef_test9(<4 x i32> %A, <4 x i32> %B) {
748 ; SSE-LABEL: combine_nested_undef_test9:
750 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,2]
753 ; AVX-LABEL: combine_nested_undef_test9:
755 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,2]
757 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 3, i32 2, i32 5>
758 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 2>
762 define <4 x i32> @combine_nested_undef_test10(<4 x i32> %A, <4 x i32> %B) {
763 ; SSE-LABEL: combine_nested_undef_test10:
765 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,3]
768 ; AVX-LABEL: combine_nested_undef_test10:
770 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,1,3]
772 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 5>
773 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 4>
777 define <4 x i32> @combine_nested_undef_test11(<4 x i32> %A, <4 x i32> %B) {
778 ; SSE-LABEL: combine_nested_undef_test11:
780 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,1]
783 ; AVX-LABEL: combine_nested_undef_test11:
785 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,1,2,1]
787 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 2, i32 5, i32 4>
788 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 0>
792 define <4 x i32> @combine_nested_undef_test12(<4 x i32> %A, <4 x i32> %B) {
793 ; SSE-LABEL: combine_nested_undef_test12:
795 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
798 ; AVX1-LABEL: combine_nested_undef_test12:
800 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
803 ; AVX2-LABEL: combine_nested_undef_test12:
805 ; AVX2-NEXT: vbroadcastss %xmm0, %xmm0
807 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 0, i32 2, i32 4>
808 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 4, i32 0, i32 4>
812 ; The following pair of shuffles is folded into vector %A.
813 define <4 x i32> @combine_nested_undef_test13(<4 x i32> %A, <4 x i32> %B) {
814 ; CHECK-LABEL: combine_nested_undef_test13:
817 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 4, i32 2, i32 6>
818 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 4, i32 0, i32 2, i32 4>
822 ; The following pair of shuffles is folded into vector %B.
823 define <4 x i32> @combine_nested_undef_test14(<4 x i32> %A, <4 x i32> %B) {
824 ; SSE-LABEL: combine_nested_undef_test14:
826 ; SSE-NEXT: movaps %xmm1, %xmm0
829 ; AVX-LABEL: combine_nested_undef_test14:
831 ; AVX-NEXT: vmovaps %xmm1, %xmm0
833 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 6, i32 2, i32 4>
834 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 4, i32 1, i32 4>
839 ; Verify that we don't optimize the following cases. We expect more than one shuffle.
841 ; FIXME: Many of these already don't make sense, and the rest should stop
842 ; making sense with th enew vector shuffle lowering. Revisit at least testing for
845 define <4 x i32> @combine_nested_undef_test15(<4 x i32> %A, <4 x i32> %B) {
846 ; SSE2-LABEL: combine_nested_undef_test15:
848 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
849 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,1]
850 ; SSE2-NEXT: movaps %xmm1, %xmm0
853 ; SSSE3-LABEL: combine_nested_undef_test15:
855 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
856 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,1]
857 ; SSSE3-NEXT: movaps %xmm1, %xmm0
860 ; SSE41-LABEL: combine_nested_undef_test15:
862 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
863 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
864 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
867 ; AVX1-LABEL: combine_nested_undef_test15:
869 ; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,1,1]
870 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
871 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
874 ; AVX2-LABEL: combine_nested_undef_test15:
876 ; AVX2-NEXT: vbroadcastss %xmm1, %xmm1
877 ; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
878 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
880 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 3, i32 1>
881 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
885 define <4 x i32> @combine_nested_undef_test16(<4 x i32> %A, <4 x i32> %B) {
886 ; SSE2-LABEL: combine_nested_undef_test16:
888 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
889 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
890 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
893 ; SSSE3-LABEL: combine_nested_undef_test16:
895 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
896 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,2,3]
897 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
900 ; SSE41-LABEL: combine_nested_undef_test16:
902 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
903 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
906 ; AVX-LABEL: combine_nested_undef_test16:
908 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
909 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
911 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
912 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
916 define <4 x i32> @combine_nested_undef_test17(<4 x i32> %A, <4 x i32> %B) {
917 ; SSE2-LABEL: combine_nested_undef_test17:
919 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
920 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[0,2]
923 ; SSSE3-LABEL: combine_nested_undef_test17:
925 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[1,0]
926 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm1[0,2]
929 ; SSE41-LABEL: combine_nested_undef_test17:
931 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
932 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,0,1]
935 ; AVX-LABEL: combine_nested_undef_test17:
937 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
938 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,0,1]
940 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 3, i32 1>
941 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
945 define <4 x i32> @combine_nested_undef_test18(<4 x i32> %A, <4 x i32> %B) {
946 ; SSE-LABEL: combine_nested_undef_test18:
948 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,0,3]
951 ; AVX-LABEL: combine_nested_undef_test18:
953 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[1,1,0,3]
955 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
956 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 0, i32 3>
960 define <4 x i32> @combine_nested_undef_test19(<4 x i32> %A, <4 x i32> %B) {
961 ; SSE2-LABEL: combine_nested_undef_test19:
963 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
964 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
967 ; SSSE3-LABEL: combine_nested_undef_test19:
969 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
970 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,0,0,0]
973 ; SSE41-LABEL: combine_nested_undef_test19:
975 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
976 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,0,0]
979 ; AVX-LABEL: combine_nested_undef_test19:
981 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
982 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,0,0,0]
984 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 0, i32 4, i32 5, i32 6>
985 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 0, i32 0, i32 0>
989 define <4 x i32> @combine_nested_undef_test20(<4 x i32> %A, <4 x i32> %B) {
990 ; SSE2-LABEL: combine_nested_undef_test20:
992 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
993 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
994 ; SSE2-NEXT: movaps %xmm1, %xmm0
997 ; SSSE3-LABEL: combine_nested_undef_test20:
999 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,3]
1000 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
1001 ; SSSE3-NEXT: movaps %xmm1, %xmm0
1004 ; SSE41-LABEL: combine_nested_undef_test20:
1006 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
1007 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,3,0]
1010 ; AVX-LABEL: combine_nested_undef_test20:
1012 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
1013 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,3,0]
1015 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 3, i32 2, i32 4, i32 4>
1016 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
1020 define <4 x i32> @combine_nested_undef_test21(<4 x i32> %A, <4 x i32> %B) {
1021 ; SSE2-LABEL: combine_nested_undef_test21:
1023 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1024 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,0,3]
1027 ; SSSE3-LABEL: combine_nested_undef_test21:
1029 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1030 ; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,0,3]
1033 ; SSE41-LABEL: combine_nested_undef_test21:
1035 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
1036 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
1039 ; AVX1-LABEL: combine_nested_undef_test21:
1041 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1042 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
1045 ; AVX2-LABEL: combine_nested_undef_test21:
1047 ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1048 ; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
1050 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 3, i32 1>
1051 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 3>
1056 ; Test that we correctly combine shuffles according to rule
1057 ; shuffle(shuffle(x, y), undef) -> shuffle(y, undef)
1059 define <4 x i32> @combine_nested_undef_test22(<4 x i32> %A, <4 x i32> %B) {
1060 ; SSE-LABEL: combine_nested_undef_test22:
1062 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,3]
1065 ; AVX-LABEL: combine_nested_undef_test22:
1067 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[1,1,1,3]
1069 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
1070 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 3>
1074 define <4 x i32> @combine_nested_undef_test23(<4 x i32> %A, <4 x i32> %B) {
1075 ; SSE-LABEL: combine_nested_undef_test23:
1077 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,3]
1080 ; AVX-LABEL: combine_nested_undef_test23:
1082 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[0,1,0,3]
1084 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 5, i32 2, i32 7>
1085 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 3>
1089 define <4 x i32> @combine_nested_undef_test24(<4 x i32> %A, <4 x i32> %B) {
1090 ; SSE-LABEL: combine_nested_undef_test24:
1092 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,3,2,3]
1095 ; AVX-LABEL: combine_nested_undef_test24:
1097 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[0,3,2,3]
1099 %1 = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
1100 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 3, i32 2, i32 4>
1104 define <4 x i32> @combine_nested_undef_test25(<4 x i32> %A, <4 x i32> %B) {
1105 ; SSE-LABEL: combine_nested_undef_test25:
1107 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
1110 ; AVX1-LABEL: combine_nested_undef_test25:
1112 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
1115 ; AVX2-LABEL: combine_nested_undef_test25:
1117 ; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
1119 %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 5, i32 2, i32 4>
1120 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 1, i32 3, i32 1>
1124 define <4 x i32> @combine_nested_undef_test26(<4 x i32> %A, <4 x i32> %B) {
1125 ; SSE-LABEL: combine_nested_undef_test26:
1127 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1130 ; AVX-LABEL: combine_nested_undef_test26:
1132 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
1134 %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 2, i32 6, i32 7>
1135 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 2, i32 3>
1139 define <4 x i32> @combine_nested_undef_test27(<4 x i32> %A, <4 x i32> %B) {
1140 ; SSE-LABEL: combine_nested_undef_test27:
1142 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
1145 ; AVX1-LABEL: combine_nested_undef_test27:
1147 ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
1150 ; AVX2-LABEL: combine_nested_undef_test27:
1152 ; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
1154 %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 2, i32 1, i32 5, i32 4>
1155 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 3, i32 2>
1159 define <4 x i32> @combine_nested_undef_test28(<4 x i32> %A, <4 x i32> %B) {
1160 ; SSE-LABEL: combine_nested_undef_test28:
1162 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
1165 ; AVX-LABEL: combine_nested_undef_test28:
1167 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,0]
1169 %1 = shufflevector <4 x i32> %B, <4 x i32> %A, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
1170 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 3, i32 2>
1174 define <4 x float> @combine_test1(<4 x float> %a, <4 x float> %b) {
1175 ; SSE-LABEL: combine_test1:
1177 ; SSE-NEXT: movaps %xmm1, %xmm0
1180 ; AVX-LABEL: combine_test1:
1182 ; AVX-NEXT: vmovaps %xmm1, %xmm0
1184 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1185 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
1189 define <4 x float> @combine_test2(<4 x float> %a, <4 x float> %b) {
1190 ; SSE2-LABEL: combine_test2:
1192 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1193 ; SSE2-NEXT: movaps %xmm1, %xmm0
1196 ; SSSE3-LABEL: combine_test2:
1198 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1199 ; SSSE3-NEXT: movaps %xmm1, %xmm0
1202 ; SSE41-LABEL: combine_test2:
1204 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1207 ; AVX-LABEL: combine_test2:
1209 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1211 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
1212 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
1216 define <4 x float> @combine_test3(<4 x float> %a, <4 x float> %b) {
1217 ; SSE-LABEL: combine_test3:
1219 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1222 ; AVX-LABEL: combine_test3:
1224 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1226 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
1227 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
1231 define <4 x float> @combine_test4(<4 x float> %a, <4 x float> %b) {
1232 ; SSE-LABEL: combine_test4:
1234 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
1237 ; AVX-LABEL: combine_test4:
1239 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
1241 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
1242 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
1246 define <4 x float> @combine_test5(<4 x float> %a, <4 x float> %b) {
1247 ; SSE2-LABEL: combine_test5:
1249 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1250 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1253 ; SSSE3-LABEL: combine_test5:
1255 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1256 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1259 ; SSE41-LABEL: combine_test5:
1261 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1264 ; AVX-LABEL: combine_test5:
1266 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1268 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1269 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
1273 define <4 x i32> @combine_test6(<4 x i32> %a, <4 x i32> %b) {
1274 ; SSE-LABEL: combine_test6:
1276 ; SSE-NEXT: movaps %xmm1, %xmm0
1279 ; AVX-LABEL: combine_test6:
1281 ; AVX-NEXT: vmovaps %xmm1, %xmm0
1283 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1284 %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
1288 define <4 x i32> @combine_test7(<4 x i32> %a, <4 x i32> %b) {
1289 ; SSE2-LABEL: combine_test7:
1291 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1292 ; SSE2-NEXT: movaps %xmm1, %xmm0
1295 ; SSSE3-LABEL: combine_test7:
1297 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1298 ; SSSE3-NEXT: movaps %xmm1, %xmm0
1301 ; SSE41-LABEL: combine_test7:
1303 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1306 ; AVX-LABEL: combine_test7:
1308 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1310 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
1311 %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
1315 define <4 x i32> @combine_test8(<4 x i32> %a, <4 x i32> %b) {
1316 ; SSE-LABEL: combine_test8:
1318 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1321 ; AVX-LABEL: combine_test8:
1323 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1325 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
1326 %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
1330 define <4 x i32> @combine_test9(<4 x i32> %a, <4 x i32> %b) {
1331 ; SSE-LABEL: combine_test9:
1333 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
1334 ; SSE-NEXT: movaps %xmm1, %xmm0
1337 ; AVX-LABEL: combine_test9:
1339 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
1341 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
1342 %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
1346 define <4 x i32> @combine_test10(<4 x i32> %a, <4 x i32> %b) {
1347 ; SSE2-LABEL: combine_test10:
1349 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1350 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1353 ; SSSE3-LABEL: combine_test10:
1355 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1356 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1359 ; SSE41-LABEL: combine_test10:
1361 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1364 ; AVX-LABEL: combine_test10:
1366 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1368 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1369 %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
1373 define <4 x float> @combine_test11(<4 x float> %a, <4 x float> %b) {
1374 ; CHECK-LABEL: combine_test11:
1377 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1378 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1382 define <4 x float> @combine_test12(<4 x float> %a, <4 x float> %b) {
1383 ; SSE2-LABEL: combine_test12:
1385 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1386 ; SSE2-NEXT: movaps %xmm1, %xmm0
1389 ; SSSE3-LABEL: combine_test12:
1391 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1392 ; SSSE3-NEXT: movaps %xmm1, %xmm0
1395 ; SSE41-LABEL: combine_test12:
1397 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1400 ; AVX-LABEL: combine_test12:
1402 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1404 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
1405 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
1409 define <4 x float> @combine_test13(<4 x float> %a, <4 x float> %b) {
1410 ; SSE-LABEL: combine_test13:
1412 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1415 ; AVX-LABEL: combine_test13:
1417 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1419 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
1420 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
1424 define <4 x float> @combine_test14(<4 x float> %a, <4 x float> %b) {
1425 ; SSE-LABEL: combine_test14:
1427 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1430 ; AVX-LABEL: combine_test14:
1432 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1434 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
1435 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
1439 define <4 x float> @combine_test15(<4 x float> %a, <4 x float> %b) {
1440 ; SSE2-LABEL: combine_test15:
1442 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1443 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1446 ; SSSE3-LABEL: combine_test15:
1448 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1449 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1452 ; SSE41-LABEL: combine_test15:
1454 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1457 ; AVX-LABEL: combine_test15:
1459 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1461 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
1462 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
1466 define <4 x i32> @combine_test16(<4 x i32> %a, <4 x i32> %b) {
1467 ; CHECK-LABEL: combine_test16:
1470 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1471 %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1475 define <4 x i32> @combine_test17(<4 x i32> %a, <4 x i32> %b) {
1476 ; SSE2-LABEL: combine_test17:
1478 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1479 ; SSE2-NEXT: movaps %xmm1, %xmm0
1482 ; SSSE3-LABEL: combine_test17:
1484 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1485 ; SSSE3-NEXT: movaps %xmm1, %xmm0
1488 ; SSE41-LABEL: combine_test17:
1490 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1493 ; AVX-LABEL: combine_test17:
1495 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1497 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
1498 %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
1502 define <4 x i32> @combine_test18(<4 x i32> %a, <4 x i32> %b) {
1503 ; SSE-LABEL: combine_test18:
1505 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1508 ; AVX-LABEL: combine_test18:
1510 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1512 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
1513 %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
1517 define <4 x i32> @combine_test19(<4 x i32> %a, <4 x i32> %b) {
1518 ; SSE-LABEL: combine_test19:
1520 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1523 ; AVX-LABEL: combine_test19:
1525 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1527 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 6, i32 7, i32 5, i32 5>
1528 %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
1532 define <4 x i32> @combine_test20(<4 x i32> %a, <4 x i32> %b) {
1533 ; SSE2-LABEL: combine_test20:
1535 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1536 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1539 ; SSSE3-LABEL: combine_test20:
1541 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1542 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1545 ; SSE41-LABEL: combine_test20:
1547 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1550 ; AVX-LABEL: combine_test20:
1552 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1554 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
1555 %2 = shufflevector <4 x i32> %1, <4 x i32> %a, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
1559 define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
1560 ; SSE-LABEL: combine_test21:
1562 ; SSE-NEXT: movaps %xmm0, %xmm2
1563 ; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
1564 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1565 ; SSE-NEXT: movaps %xmm2, (%rdi)
1568 ; AVX-LABEL: combine_test21:
1570 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
1571 ; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm1[0]
1572 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1573 ; AVX-NEXT: vmovaps %xmm2, (%rdi)
1574 ; AVX-NEXT: vzeroupper
1576 %1 = shufflevector <8 x i32> %a, <8 x i32> %a, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
1577 %2 = shufflevector <8 x i32> %a, <8 x i32> %a, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
1578 store <4 x i32> %1, <4 x i32>* %ptr, align 16
1582 define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) {
1583 ; SSE-LABEL: combine_test22:
1585 ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1586 ; SSE-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1589 ; AVX-LABEL: combine_test22:
1591 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
1592 ; AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1594 ; Current AVX2 lowering of this is still awful, not adding a test case.
1595 %1 = load <2 x float>, <2 x float>* %a, align 8
1596 %2 = load <2 x float>, <2 x float>* %b, align 8
1597 %3 = shufflevector <2 x float> %1, <2 x float> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
1602 define void @combine_test23(<8 x float> %v, <2 x float>* %ptr) {
1603 ; SSE-LABEL: combine_test23:
1605 ; SSE-NEXT: movups %xmm0, (%rdi)
1608 ; AVX-LABEL: combine_test23:
1610 ; AVX-NEXT: vmovups %xmm0, (%rdi)
1611 ; AVX-NEXT: vzeroupper
1613 %idx2 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 1
1614 %shuffle0 = shufflevector <8 x float> %v, <8 x float> undef, <2 x i32> <i32 0, i32 1>
1615 %shuffle1 = shufflevector <8 x float> %v, <8 x float> undef, <2 x i32> <i32 2, i32 3>
1616 store <2 x float> %shuffle0, <2 x float>* %ptr, align 8
1617 store <2 x float> %shuffle1, <2 x float>* %idx2, align 8
1621 ; Check some negative cases.
1622 ; FIXME: Do any of these really make sense? Are they redundant with the above tests?
1624 define <4 x float> @combine_test1b(<4 x float> %a, <4 x float> %b) {
1625 ; SSE-LABEL: combine_test1b:
1627 ; SSE-NEXT: movaps %xmm1, %xmm0
1628 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
1631 ; AVX-LABEL: combine_test1b:
1633 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[0,1,2,0]
1635 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1636 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 0>
1640 define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) {
1641 ; SSE2-LABEL: combine_test2b:
1643 ; SSE2-NEXT: movaps %xmm1, %xmm0
1644 ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1647 ; SSSE3-LABEL: combine_test2b:
1649 ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
1652 ; SSE41-LABEL: combine_test2b:
1654 ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm1[0,0]
1657 ; AVX-LABEL: combine_test2b:
1659 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm1[0,0]
1661 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1662 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 0, i32 5>
1666 define <4 x float> @combine_test3b(<4 x float> %a, <4 x float> %b) {
1667 ; SSE2-LABEL: combine_test3b:
1669 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
1670 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
1673 ; SSSE3-LABEL: combine_test3b:
1675 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[3,0]
1676 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
1679 ; SSE41-LABEL: combine_test3b:
1681 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
1682 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3,2,3]
1685 ; AVX-LABEL: combine_test3b:
1687 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
1688 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,3,2,3]
1690 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 6, i32 3>
1691 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 7>
1695 define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) {
1696 ; SSE-LABEL: combine_test4b:
1698 ; SSE-NEXT: movaps %xmm1, %xmm0
1699 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[2,3]
1702 ; AVX-LABEL: combine_test4b:
1704 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[1,1,2,3]
1706 %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1707 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 5, i32 5, i32 2, i32 7>
1712 ; Verify that we correctly fold shuffles even when we use illegal vector types.
1714 define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
1715 ; SSE2-LABEL: combine_test1c:
1717 ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
1718 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1719 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1720 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1721 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
1724 ; SSSE3-LABEL: combine_test1c:
1726 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
1727 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1728 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1729 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1730 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
1733 ; SSE41-LABEL: combine_test1c:
1735 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1736 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1737 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
1740 ; AVX1-LABEL: combine_test1c:
1742 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1743 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1744 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1747 ; AVX2-LABEL: combine_test1c:
1749 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1750 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1751 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
1753 %A = load <4 x i8>, <4 x i8>* %a
1754 %B = load <4 x i8>, <4 x i8>* %b
1755 %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
1756 %2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
1760 define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
1761 ; SSE2-LABEL: combine_test2c:
1763 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1764 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1765 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1766 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1767 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1768 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1769 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1772 ; SSSE3-LABEL: combine_test2c:
1774 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1775 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1776 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1777 ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1778 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1779 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1780 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1783 ; SSE41-LABEL: combine_test2c:
1785 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1786 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1787 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1790 ; AVX-LABEL: combine_test2c:
1792 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1793 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1794 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1796 %A = load <4 x i8>, <4 x i8>* %a
1797 %B = load <4 x i8>, <4 x i8>* %b
1798 %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 0, i32 5, i32 1, i32 5>
1799 %2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
1803 define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
1804 ; SSE2-LABEL: combine_test3c:
1806 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1807 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1808 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1809 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1810 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1811 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1812 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1815 ; SSSE3-LABEL: combine_test3c:
1817 ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1818 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1819 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1820 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1821 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1822 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1823 ; SSSE3-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1826 ; SSE41-LABEL: combine_test3c:
1828 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1829 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1830 ; SSE41-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1833 ; AVX-LABEL: combine_test3c:
1835 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1836 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1837 ; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm0[1]
1839 %A = load <4 x i8>, <4 x i8>* %a
1840 %B = load <4 x i8>, <4 x i8>* %b
1841 %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
1842 %2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
1846 define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
1847 ; SSE2-LABEL: combine_test4c:
1849 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1850 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1851 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1852 ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
1853 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1854 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1855 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1856 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1859 ; SSSE3-LABEL: combine_test4c:
1861 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1862 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1863 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1864 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
1865 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
1866 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1867 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1868 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
1871 ; SSE41-LABEL: combine_test4c:
1873 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1874 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1875 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
1878 ; AVX1-LABEL: combine_test4c:
1880 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1881 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1882 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
1885 ; AVX2-LABEL: combine_test4c:
1887 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1888 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
1889 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1891 %A = load <4 x i8>, <4 x i8>* %a
1892 %B = load <4 x i8>, <4 x i8>* %b
1893 %1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1894 %2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
1899 ; The following test cases are generated from this C++ code
1901 ;__m128 blend_01(__m128 a, __m128 b)
1904 ; s = _mm_blend_ps( s, b, 1<<0 );
1905 ; s = _mm_blend_ps( s, b, 1<<1 );
1909 ;__m128 blend_02(__m128 a, __m128 b)
1912 ; s = _mm_blend_ps( s, b, 1<<0 );
1913 ; s = _mm_blend_ps( s, b, 1<<2 );
1917 ;__m128 blend_123(__m128 a, __m128 b)
1920 ; s = _mm_blend_ps( s, b, 1<<1 );
1921 ; s = _mm_blend_ps( s, b, 1<<2 );
1922 ; s = _mm_blend_ps( s, b, 1<<3 );
1926 ; Ideally, we should collapse the following shuffles into a single one.
1928 define <4 x float> @combine_blend_01(<4 x float> %a, <4 x float> %b) {
1929 ; SSE2-LABEL: combine_blend_01:
1931 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1934 ; SSSE3-LABEL: combine_blend_01:
1936 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1939 ; SSE41-LABEL: combine_blend_01:
1941 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
1944 ; AVX-LABEL: combine_blend_01:
1946 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
1948 %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 undef, i32 2, i32 3>
1949 %shuffle6 = shufflevector <4 x float> %shuffle, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
1950 ret <4 x float> %shuffle6
1953 define <4 x float> @combine_blend_02(<4 x float> %a, <4 x float> %b) {
1954 ; SSE2-LABEL: combine_blend_02:
1956 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
1957 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
1958 ; SSE2-NEXT: movaps %xmm1, %xmm0
1961 ; SSSE3-LABEL: combine_blend_02:
1963 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
1964 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
1965 ; SSSE3-NEXT: movaps %xmm1, %xmm0
1968 ; SSE41-LABEL: combine_blend_02:
1970 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
1973 ; AVX-LABEL: combine_blend_02:
1975 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
1977 %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 undef, i32 3>
1978 %shuffle6 = shufflevector <4 x float> %shuffle, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
1979 ret <4 x float> %shuffle6
1982 define <4 x float> @combine_blend_123(<4 x float> %a, <4 x float> %b) {
1983 ; SSE2-LABEL: combine_blend_123:
1985 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1986 ; SSE2-NEXT: movaps %xmm1, %xmm0
1989 ; SSSE3-LABEL: combine_blend_123:
1991 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1992 ; SSSE3-NEXT: movaps %xmm1, %xmm0
1995 ; SSE41-LABEL: combine_blend_123:
1997 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
2000 ; AVX-LABEL: combine_blend_123:
2002 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
2004 %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 undef, i32 undef>
2005 %shuffle6 = shufflevector <4 x float> %shuffle, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 undef>
2006 %shuffle12 = shufflevector <4 x float> %shuffle6, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
2007 ret <4 x float> %shuffle12
2010 define <4 x i32> @combine_test_movhl_1(<4 x i32> %a, <4 x i32> %b) {
2011 ; SSE-LABEL: combine_test_movhl_1:
2013 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
2014 ; SSE-NEXT: movaps %xmm1, %xmm0
2017 ; AVX-LABEL: combine_test_movhl_1:
2019 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
2021 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 7, i32 5, i32 3>
2022 %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 6, i32 1, i32 0, i32 3>
2026 define <4 x i32> @combine_test_movhl_2(<4 x i32> %a, <4 x i32> %b) {
2027 ; SSE-LABEL: combine_test_movhl_2:
2029 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
2030 ; SSE-NEXT: movaps %xmm1, %xmm0
2033 ; AVX-LABEL: combine_test_movhl_2:
2035 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
2037 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 0, i32 3, i32 6>
2038 %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 3, i32 7, i32 0, i32 2>
2042 define <4 x i32> @combine_test_movhl_3(<4 x i32> %a, <4 x i32> %b) {
2043 ; SSE-LABEL: combine_test_movhl_3:
2045 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
2046 ; SSE-NEXT: movaps %xmm1, %xmm0
2049 ; AVX-LABEL: combine_test_movhl_3:
2051 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
2053 %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 7, i32 6, i32 3, i32 2>
2054 %2 = shufflevector <4 x i32> %1, <4 x i32> %b, <4 x i32> <i32 6, i32 0, i32 3, i32 2>
2059 ; Verify that we fold shuffles according to rule:
2060 ; (shuffle(shuffle A, Undef, M0), B, M1) -> (shuffle A, B, M2)
2062 define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) {
2063 ; SSE2-LABEL: combine_undef_input_test1:
2065 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2068 ; SSSE3-LABEL: combine_undef_input_test1:
2070 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2073 ; SSE41-LABEL: combine_undef_input_test1:
2075 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2078 ; AVX-LABEL: combine_undef_input_test1:
2080 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2082 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
2083 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 4, i32 5, i32 1, i32 2>
2087 define <4 x float> @combine_undef_input_test2(<4 x float> %a, <4 x float> %b) {
2088 ; SSE-LABEL: combine_undef_input_test2:
2090 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2093 ; AVX-LABEL: combine_undef_input_test2:
2095 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2097 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
2098 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
2102 define <4 x float> @combine_undef_input_test3(<4 x float> %a, <4 x float> %b) {
2103 ; SSE-LABEL: combine_undef_input_test3:
2105 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2108 ; AVX-LABEL: combine_undef_input_test3:
2110 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2112 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
2113 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
2117 define <4 x float> @combine_undef_input_test4(<4 x float> %a, <4 x float> %b) {
2118 ; SSE-LABEL: combine_undef_input_test4:
2120 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
2123 ; AVX-LABEL: combine_undef_input_test4:
2125 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
2127 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
2128 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
2132 define <4 x float> @combine_undef_input_test5(<4 x float> %a, <4 x float> %b) {
2133 ; SSE2-LABEL: combine_undef_input_test5:
2135 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2138 ; SSSE3-LABEL: combine_undef_input_test5:
2140 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2143 ; SSE41-LABEL: combine_undef_input_test5:
2145 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2148 ; AVX-LABEL: combine_undef_input_test5:
2150 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2152 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
2153 %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 6, i32 7>
2158 ; Verify that we fold shuffles according to rule:
2159 ; (shuffle(shuffle A, Undef, M0), A, M1) -> (shuffle A, Undef, M2)
2161 define <4 x float> @combine_undef_input_test6(<4 x float> %a) {
2162 ; CHECK-LABEL: combine_undef_input_test6:
2165 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
2166 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 4, i32 5, i32 1, i32 2>
2170 define <4 x float> @combine_undef_input_test7(<4 x float> %a) {
2171 ; SSE2-LABEL: combine_undef_input_test7:
2173 ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
2176 ; SSSE3-LABEL: combine_undef_input_test7:
2178 ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2181 ; SSE41-LABEL: combine_undef_input_test7:
2183 ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2186 ; AVX-LABEL: combine_undef_input_test7:
2188 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
2190 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
2191 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 1, i32 2, i32 4, i32 5>
2195 define <4 x float> @combine_undef_input_test8(<4 x float> %a) {
2196 ; SSE2-LABEL: combine_undef_input_test8:
2198 ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
2201 ; SSSE3-LABEL: combine_undef_input_test8:
2203 ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2206 ; SSE41-LABEL: combine_undef_input_test8:
2208 ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2211 ; AVX-LABEL: combine_undef_input_test8:
2213 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
2215 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
2216 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
2220 define <4 x float> @combine_undef_input_test9(<4 x float> %a) {
2221 ; SSE-LABEL: combine_undef_input_test9:
2223 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
2226 ; AVX-LABEL: combine_undef_input_test9:
2228 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
2230 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
2231 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
2235 define <4 x float> @combine_undef_input_test10(<4 x float> %a) {
2236 ; CHECK-LABEL: combine_undef_input_test10:
2239 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
2240 %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 2, i32 6, i32 7>
2244 define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) {
2245 ; SSE2-LABEL: combine_undef_input_test11:
2247 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2250 ; SSSE3-LABEL: combine_undef_input_test11:
2252 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2255 ; SSE41-LABEL: combine_undef_input_test11:
2257 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2260 ; AVX-LABEL: combine_undef_input_test11:
2262 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
2264 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
2265 %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 0, i32 1, i32 5, i32 6>
2269 define <4 x float> @combine_undef_input_test12(<4 x float> %a, <4 x float> %b) {
2270 ; SSE-LABEL: combine_undef_input_test12:
2272 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2275 ; AVX-LABEL: combine_undef_input_test12:
2277 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2279 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
2280 %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 5, i32 6, i32 0, i32 1>
2284 define <4 x float> @combine_undef_input_test13(<4 x float> %a, <4 x float> %b) {
2285 ; SSE-LABEL: combine_undef_input_test13:
2287 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2290 ; AVX-LABEL: combine_undef_input_test13:
2292 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2294 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
2295 %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 4, i32 5, i32 0, i32 5>
2299 define <4 x float> @combine_undef_input_test14(<4 x float> %a, <4 x float> %b) {
2300 ; SSE-LABEL: combine_undef_input_test14:
2302 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
2305 ; AVX-LABEL: combine_undef_input_test14:
2307 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1]
2309 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
2310 %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
2314 define <4 x float> @combine_undef_input_test15(<4 x float> %a, <4 x float> %b) {
2315 ; SSE2-LABEL: combine_undef_input_test15:
2317 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2320 ; SSSE3-LABEL: combine_undef_input_test15:
2322 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2325 ; SSE41-LABEL: combine_undef_input_test15:
2327 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2330 ; AVX-LABEL: combine_undef_input_test15:
2332 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2334 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
2335 %2 = shufflevector <4 x float> %b, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
2340 ; Verify that shuffles are canonicalized according to rules:
2341 ; shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B)
2343 ; This allows to trigger the following combine rule:
2344 ; (shuffle(shuffle A, Undef, M0), A, M1) -> (shuffle A, Undef, M2)
2346 ; As a result, all the shuffle pairs in each function below should be
2347 ; combined into a single legal shuffle operation.
2349 define <4 x float> @combine_undef_input_test16(<4 x float> %a) {
2350 ; CHECK-LABEL: combine_undef_input_test16:
2353 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 1>
2354 %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
2358 define <4 x float> @combine_undef_input_test17(<4 x float> %a) {
2359 ; SSE2-LABEL: combine_undef_input_test17:
2361 ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
2364 ; SSSE3-LABEL: combine_undef_input_test17:
2366 ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2369 ; SSE41-LABEL: combine_undef_input_test17:
2371 ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2374 ; AVX-LABEL: combine_undef_input_test17:
2376 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
2378 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 6, i32 0, i32 1, i32 7>
2379 %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 5, i32 6, i32 0, i32 1>
2383 define <4 x float> @combine_undef_input_test18(<4 x float> %a) {
2384 ; SSE2-LABEL: combine_undef_input_test18:
2386 ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
2389 ; SSSE3-LABEL: combine_undef_input_test18:
2391 ; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2394 ; SSE41-LABEL: combine_undef_input_test18:
2396 ; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
2399 ; AVX-LABEL: combine_undef_input_test18:
2401 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
2403 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
2404 %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 0, i32 5>
2408 define <4 x float> @combine_undef_input_test19(<4 x float> %a) {
2409 ; SSE-LABEL: combine_undef_input_test19:
2411 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
2414 ; AVX-LABEL: combine_undef_input_test19:
2416 ; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1]
2418 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
2419 %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
2423 define <4 x float> @combine_undef_input_test20(<4 x float> %a) {
2424 ; CHECK-LABEL: combine_undef_input_test20:
2427 %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 4, i32 1, i32 3>
2428 %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
2432 ; These tests are designed to test the ability to combine away unnecessary
2433 ; operations feeding into a shuffle. The AVX cases are the important ones as
2434 ; they leverage operations which cannot be done naturally on the entire vector
2435 ; and thus are decomposed into multiple smaller operations.
2437 define <8 x i32> @combine_unneeded_subvector1(<8 x i32> %a) {
2438 ; SSE-LABEL: combine_unneeded_subvector1:
2440 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
2441 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,2,1,0]
2442 ; SSE-NEXT: movdqa %xmm0, %xmm1
2445 ; AVX1-LABEL: combine_unneeded_subvector1:
2447 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
2448 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
2449 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2450 ; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
2451 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
2454 ; AVX2-SLOW-LABEL: combine_unneeded_subvector1:
2455 ; AVX2-SLOW: # %bb.0:
2456 ; AVX2-SLOW-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
2457 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
2458 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
2459 ; AVX2-SLOW-NEXT: retq
2461 ; AVX2-FAST-LABEL: combine_unneeded_subvector1:
2462 ; AVX2-FAST: # %bb.0:
2463 ; AVX2-FAST-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
2464 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4]
2465 ; AVX2-FAST-NEXT: # ymm1 = mem[0,1,0,1]
2466 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
2467 ; AVX2-FAST-NEXT: retq
2468 %b = add <8 x i32> %a, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
2469 %c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 7, i32 6, i32 5, i32 4>
2473 define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
2474 ; SSE-LABEL: combine_unneeded_subvector2:
2476 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
2477 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[3,2,1,0]
2478 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,2,1,0]
2481 ; AVX1-LABEL: combine_unneeded_subvector2:
2483 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
2484 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
2485 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2486 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
2487 ; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
2490 ; AVX2-LABEL: combine_unneeded_subvector2:
2492 ; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
2493 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
2494 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
2496 %c = add <8 x i32> %a, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
2497 %d = shufflevector <8 x i32> %b, <8 x i32> %c, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 15, i32 14, i32 13, i32 12>
2501 define <4 x float> @combine_insertps1(<4 x float> %a, <4 x float> %b) {
2502 ; SSE2-LABEL: combine_insertps1:
2504 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
2505 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
2506 ; SSE2-NEXT: movaps %xmm1, %xmm0
2509 ; SSSE3-LABEL: combine_insertps1:
2511 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[1,0]
2512 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
2513 ; SSSE3-NEXT: movaps %xmm1, %xmm0
2516 ; SSE41-LABEL: combine_insertps1:
2518 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3]
2521 ; AVX-LABEL: combine_insertps1:
2523 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[2],xmm0[1,2,3]
2526 %c = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32><i32 0, i32 6, i32 2, i32 4>
2527 %d = shufflevector <4 x float> %a, <4 x float> %c, <4 x i32> <i32 5, i32 1, i32 6, i32 3>
2531 define <4 x float> @combine_insertps2(<4 x float> %a, <4 x float> %b) {
2532 ; SSE2-LABEL: combine_insertps2:
2534 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,0]
2535 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
2536 ; SSE2-NEXT: movaps %xmm1, %xmm0
2539 ; SSSE3-LABEL: combine_insertps2:
2541 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[0,0]
2542 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
2543 ; SSSE3-NEXT: movaps %xmm1, %xmm0
2546 ; SSE41-LABEL: combine_insertps2:
2548 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3]
2551 ; AVX-LABEL: combine_insertps2:
2553 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[2,3]
2556 %c = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32><i32 0, i32 1, i32 6, i32 7>
2557 %d = shufflevector <4 x float> %a, <4 x float> %c, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
2561 define <4 x float> @combine_insertps3(<4 x float> %a, <4 x float> %b) {
2562 ; SSE2-LABEL: combine_insertps3:
2564 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
2565 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
2568 ; SSSE3-LABEL: combine_insertps3:
2570 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
2571 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
2574 ; SSE41-LABEL: combine_insertps3:
2576 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
2579 ; AVX-LABEL: combine_insertps3:
2581 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
2584 %c = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32><i32 0, i32 4, i32 2, i32 5>
2585 %d = shufflevector <4 x float> %a, <4 x float> %c, <4 x i32><i32 4, i32 1, i32 5, i32 3>
2589 define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) {
2590 ; SSE2-LABEL: combine_insertps4:
2592 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
2593 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
2596 ; SSSE3-LABEL: combine_insertps4:
2598 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
2599 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
2602 ; SSE41-LABEL: combine_insertps4:
2604 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
2607 ; AVX-LABEL: combine_insertps4:
2609 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
2612 %c = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32><i32 0, i32 4, i32 2, i32 5>
2613 %d = shufflevector <4 x float> %a, <4 x float> %c, <4 x i32><i32 4, i32 1, i32 6, i32 5>
2617 define void @combine_scalar_load_with_blend_with_zero(double* %a0, <4 x float>* %a1) {
2618 ; SSE-LABEL: combine_scalar_load_with_blend_with_zero:
2620 ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
2621 ; SSE-NEXT: movaps %xmm0, (%rsi)
2624 ; AVX-LABEL: combine_scalar_load_with_blend_with_zero:
2626 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
2627 ; AVX-NEXT: vmovaps %xmm0, (%rsi)
2629 %1 = load double, double* %a0, align 8
2630 %2 = insertelement <2 x double> undef, double %1, i32 0
2631 %3 = insertelement <2 x double> %2, double 0.000000e+00, i32 1
2632 %4 = bitcast <2 x double> %3 to <4 x float>
2633 %5 = shufflevector <4 x float> %4, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
2634 store <4 x float> %5, <4 x float>* %a1, align 16
2639 define <4 x float> @combine_constant_insertion_v4f32(float %f) {
2640 ; SSE2-LABEL: combine_constant_insertion_v4f32:
2642 ; SSE2-NEXT: movaps {{.*#+}} xmm1 = <u,4.0E+0,5.0E+0,3.0E+0>
2643 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2644 ; SSE2-NEXT: movaps %xmm1, %xmm0
2647 ; SSSE3-LABEL: combine_constant_insertion_v4f32:
2649 ; SSSE3-NEXT: movaps {{.*#+}} xmm1 = <u,4.0E+0,5.0E+0,3.0E+0>
2650 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2651 ; SSSE3-NEXT: movaps %xmm1, %xmm0
2654 ; SSE41-LABEL: combine_constant_insertion_v4f32:
2656 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
2659 ; AVX-LABEL: combine_constant_insertion_v4f32:
2661 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3]
2663 %a0 = insertelement <4 x float> undef, float %f, i32 0
2664 %ret = shufflevector <4 x float> %a0, <4 x float> <float undef, float 4.0, float 5.0, float 3.0>, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
2665 ret <4 x float> %ret
2668 define <4 x i32> @combine_constant_insertion_v4i32(i32 %f) {
2669 ; SSE2-LABEL: combine_constant_insertion_v4i32:
2671 ; SSE2-NEXT: movd %edi, %xmm1
2672 ; SSE2-NEXT: movaps {{.*#+}} xmm0 = <u,4,5,30>
2673 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
2676 ; SSSE3-LABEL: combine_constant_insertion_v4i32:
2678 ; SSSE3-NEXT: movd %edi, %xmm1
2679 ; SSSE3-NEXT: movaps {{.*#+}} xmm0 = <u,4,5,30>
2680 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
2683 ; SSE41-LABEL: combine_constant_insertion_v4i32:
2685 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = <u,4,5,30>
2686 ; SSE41-NEXT: pinsrd $0, %edi, %xmm0
2689 ; AVX-LABEL: combine_constant_insertion_v4i32:
2691 ; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <u,4,5,30>
2692 ; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm0
2694 %a0 = insertelement <4 x i32> undef, i32 %f, i32 0
2695 %ret = shufflevector <4 x i32> %a0, <4 x i32> <i32 undef, i32 4, i32 5, i32 30>, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
2699 define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) {
2700 ; SSE2-LABEL: PR22377:
2701 ; SSE2: # %bb.0: # %entry
2702 ; SSE2-NEXT: movaps %xmm0, %xmm1
2703 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[2,3]
2704 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
2705 ; SSE2-NEXT: addps %xmm0, %xmm1
2706 ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2709 ; SSSE3-LABEL: PR22377:
2710 ; SSSE3: # %bb.0: # %entry
2711 ; SSSE3-NEXT: movaps %xmm0, %xmm1
2712 ; SSSE3-NEXT: haddps %xmm0, %xmm1
2713 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,1]
2714 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
2717 ; SSE41-LABEL: PR22377:
2718 ; SSE41: # %bb.0: # %entry
2719 ; SSE41-NEXT: movaps %xmm0, %xmm1
2720 ; SSE41-NEXT: haddps %xmm0, %xmm1
2721 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,1]
2722 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
2725 ; AVX-LABEL: PR22377:
2726 ; AVX: # %bb.0: # %entry
2727 ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm1
2728 ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,1]
2729 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
2732 %s1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 1, i32 3>
2733 %s2 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
2734 %r2 = fadd <4 x float> %s1, %s2
2735 %s3 = shufflevector <4 x float> %s2, <4 x float> %r2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
2739 define <4 x float> @PR22390(<4 x float> %a, <4 x float> %b) {
2740 ; SSE2-LABEL: PR22390:
2741 ; SSE2: # %bb.0: # %entry
2742 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
2743 ; SSE2-NEXT: movaps %xmm0, %xmm2
2744 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
2745 ; SSE2-NEXT: addps %xmm0, %xmm2
2746 ; SSE2-NEXT: movaps %xmm2, %xmm0
2749 ; SSSE3-LABEL: PR22390:
2750 ; SSSE3: # %bb.0: # %entry
2751 ; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
2752 ; SSSE3-NEXT: movaps %xmm0, %xmm2
2753 ; SSSE3-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
2754 ; SSSE3-NEXT: addps %xmm0, %xmm2
2755 ; SSSE3-NEXT: movaps %xmm2, %xmm0
2758 ; SSE41-LABEL: PR22390:
2759 ; SSE41: # %bb.0: # %entry
2760 ; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0,1,2]
2761 ; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
2762 ; SSE41-NEXT: addps %xmm1, %xmm0
2765 ; AVX-LABEL: PR22390:
2766 ; AVX: # %bb.0: # %entry
2767 ; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,0,1,2]
2768 ; AVX-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3]
2769 ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
2772 %s1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 3, i32 0, i32 1, i32 2>
2773 %s2 = shufflevector <4 x float> %s1, <4 x float> %b, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
2774 %r2 = fadd <4 x float> %s1, %s2
2778 define <8 x float> @PR22412(<8 x float> %a, <8 x float> %b) {
2779 ; SSE-LABEL: PR22412:
2780 ; SSE: # %bb.0: # %entry
2781 ; SSE-NEXT: movaps %xmm3, %xmm1
2782 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[3,2]
2783 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[3,2]
2786 ; AVX1-LABEL: PR22412:
2787 ; AVX1: # %bb.0: # %entry
2788 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3,0,1]
2789 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2790 ; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm2[3,2],ymm0[5,4],ymm2[7,6]
2793 ; AVX2-LABEL: PR22412:
2794 ; AVX2: # %bb.0: # %entry
2795 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
2796 ; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,3,0,1]
2797 ; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,0],ymm1[3,2],ymm0[5,4],ymm1[7,6]
2800 %s1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2801 %s2 = shufflevector <8 x float> %s1, <8 x float> undef, <8 x i32> <i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2>
2805 define <4 x float> @PR30264(<4 x float> %x) {
2806 ; SSE2-LABEL: PR30264:
2808 ; SSE2-NEXT: xorps %xmm1, %xmm1
2809 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
2810 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],mem[2,3]
2811 ; SSE2-NEXT: movaps %xmm1, %xmm0
2814 ; SSSE3-LABEL: PR30264:
2816 ; SSSE3-NEXT: xorps %xmm1, %xmm1
2817 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
2818 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],mem[2,3]
2819 ; SSSE3-NEXT: movaps %xmm1, %xmm0
2822 ; SSE41-LABEL: PR30264:
2824 ; SSE41-NEXT: movaps {{.*#+}} xmm1 = <u,u,4.0E+0,1.0E+0>
2825 ; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm0[0],zero,xmm1[2,3]
2826 ; SSE41-NEXT: movaps %xmm1, %xmm0
2829 ; AVX-LABEL: PR30264:
2831 ; AVX-NEXT: vmovaps {{.*#+}} xmm1 = <u,u,4.0E+0,1.0E+0>
2832 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2,3]
2834 %shuf1 = shufflevector <4 x float> %x, <4 x float> <float undef, float 0.0, float undef, float undef>, <4 x i32> <i32 0, i32 5, i32 undef, i32 undef>
2835 %shuf2 = shufflevector <4 x float> %shuf1, <4 x float> <float undef, float undef, float 4.0, float 1.0>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
2836 ret <4 x float> %shuf2
2839 define <8 x i16> @PR39549(<16 x i8> %x) {
2840 ; SSE-LABEL: PR39549:
2842 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2843 ; SSE-NEXT: psraw $8, %xmm0
2846 ; AVX-LABEL: PR39549:
2848 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2849 ; AVX-NEXT: vpsraw $8, %xmm0, %xmm0
2851 %a = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 8, i32 undef, i32 9, i32 undef, i32 10, i32 undef, i32 11, i32 undef, i32 12, i32 undef, i32 13, i32 undef, i32 14, i32 undef, i32 15, i32 undef>
2852 %b = bitcast <16 x i8> %a to <8 x i16>
2853 %c = shl <8 x i16> %b, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
2854 %d = ashr <8 x i16> %c, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
2858 define <4 x i32> @PR41545(<4 x i32> %a0, <16 x i8> %a1) {
2859 ; SSE-LABEL: PR41545:
2861 ; SSE-NEXT: paddd %xmm1, %xmm0
2864 ; AVX-LABEL: PR41545:
2866 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
2868 %1 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
2869 %2 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
2870 %3 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
2871 %4 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
2872 %5 = zext <4 x i8> %1 to <4 x i32>
2873 %6 = zext <4 x i8> %2 to <4 x i32>
2874 %7 = zext <4 x i8> %3 to <4 x i32>
2875 %8 = zext <4 x i8> %4 to <4 x i32>
2876 %9 = shl <4 x i32> %6, <i32 8, i32 8, i32 8, i32 8>
2877 %10 = shl <4 x i32> %7, <i32 16, i32 16, i32 16, i32 16>
2878 %11 = shl <4 x i32> %8, <i32 24, i32 24, i32 24, i32 24>
2879 %12 = or <4 x i32> %5, %9
2880 %13 = or <4 x i32> %12, %10
2881 %14 = or <4 x i32> %13, %11
2882 %15 = add <4 x i32> %a0, %14