1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-SLOW
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX-FAST
8 define <4 x i32> @combine_vec_shl_zero(<4 x i32> %x) {
9 ; SSE-LABEL: combine_vec_shl_zero:
11 ; SSE-NEXT: xorps %xmm0, %xmm0
14 ; AVX-LABEL: combine_vec_shl_zero:
16 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
18 %1 = shl <4 x i32> zeroinitializer, %x
22 ; fold (shl x, c >= size(x)) -> undef
23 define <4 x i32> @combine_vec_shl_outofrange0(<4 x i32> %x) {
24 ; CHECK-LABEL: combine_vec_shl_outofrange0:
27 %1 = shl <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
31 define <4 x i32> @combine_vec_shl_outofrange1(<4 x i32> %x) {
32 ; CHECK-LABEL: combine_vec_shl_outofrange1:
35 %1 = shl <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
39 define <4 x i32> @combine_vec_shl_outofrange2(<4 x i32> %a0) {
40 ; CHECK-LABEL: combine_vec_shl_outofrange2:
43 %1 = and <4 x i32> %a0, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
44 %2 = shl <4 x i32> %1, <i32 33, i32 33, i32 33, i32 33>
48 define <4 x i32> @combine_vec_shl_outofrange3(<4 x i32> %a0) {
49 ; CHECK-LABEL: combine_vec_shl_outofrange3:
52 %1 = shl <4 x i32> %a0, <i32 33, i32 34, i32 35, i32 undef>
56 ; fold (shl x, 0) -> x
57 define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) {
58 ; CHECK-LABEL: combine_vec_shl_by_zero:
61 %1 = shl <4 x i32> %x, zeroinitializer
65 ; if (shl x, c) is known to be zero, return 0
66 define <4 x i32> @combine_vec_shl_known_zero0(<4 x i32> %x) {
67 ; SSE-LABEL: combine_vec_shl_known_zero0:
69 ; SSE-NEXT: xorps %xmm0, %xmm0
72 ; AVX-LABEL: combine_vec_shl_known_zero0:
74 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
76 %1 = and <4 x i32> %x, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
77 %2 = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
81 define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
82 ; SSE2-LABEL: combine_vec_shl_known_zero1:
84 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
85 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65536,32768,16384,8192]
86 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
87 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
88 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
89 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
90 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
91 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
92 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
95 ; SSE41-LABEL: combine_vec_shl_known_zero1:
97 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
98 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
101 ; AVX-LABEL: combine_vec_shl_known_zero1:
103 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
104 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
106 %1 = and <4 x i32> %x, <i32 4294901760, i32 8589803520, i32 17179607040, i32 34359214080>
107 %2 = shl <4 x i32> %1, <i32 16, i32 15, i32 14, i32 13>
111 ; fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
112 define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
113 ; SSE2-LABEL: combine_vec_shl_trunc_and:
115 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
116 ; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
117 ; SSE2-NEXT: pslld $23, %xmm1
118 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
119 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
120 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
121 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
122 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
123 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
124 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
125 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
126 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
129 ; SSE41-LABEL: combine_vec_shl_trunc_and:
131 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
132 ; SSE41-NEXT: andps {{.*}}(%rip), %xmm1
133 ; SSE41-NEXT: pslld $23, %xmm1
134 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
135 ; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
136 ; SSE41-NEXT: pmulld %xmm1, %xmm0
139 ; AVX-SLOW-LABEL: combine_vec_shl_trunc_and:
141 ; AVX-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
142 ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
143 ; AVX-SLOW-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
144 ; AVX-SLOW-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
145 ; AVX-SLOW-NEXT: vzeroupper
146 ; AVX-SLOW-NEXT: retq
148 ; AVX-FAST-LABEL: combine_vec_shl_trunc_and:
150 ; AVX-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
151 ; AVX-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
152 ; AVX-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
153 ; AVX-FAST-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
154 ; AVX-FAST-NEXT: vzeroupper
155 ; AVX-FAST-NEXT: retq
156 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
157 %2 = trunc <4 x i64> %1 to <4 x i32>
158 %3 = shl <4 x i32> %x, %2
162 ; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
163 define <4 x i32> @combine_vec_shl_shl0(<4 x i32> %x) {
164 ; SSE-LABEL: combine_vec_shl_shl0:
166 ; SSE-NEXT: pslld $6, %xmm0
169 ; AVX-LABEL: combine_vec_shl_shl0:
171 ; AVX-NEXT: vpslld $6, %xmm0, %xmm0
173 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
174 %2 = shl <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
178 define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
179 ; SSE2-LABEL: combine_vec_shl_shl1:
181 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,64,256,1024]
182 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
183 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
184 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
185 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
186 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
187 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
188 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
191 ; SSE41-LABEL: combine_vec_shl_shl1:
193 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
196 ; AVX-LABEL: combine_vec_shl_shl1:
198 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
200 %1 = shl <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
201 %2 = shl <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
205 ; fold (shl (shl x, c1), c2) -> 0
206 define <4 x i32> @combine_vec_shl_shlr_zero0(<4 x i32> %x) {
207 ; SSE-LABEL: combine_vec_shl_shlr_zero0:
209 ; SSE-NEXT: xorps %xmm0, %xmm0
212 ; AVX-LABEL: combine_vec_shl_shlr_zero0:
214 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
216 %1 = shl <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
217 %2 = shl <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
221 define <4 x i32> @combine_vec_shl_shl_zero1(<4 x i32> %x) {
222 ; SSE-LABEL: combine_vec_shl_shl_zero1:
224 ; SSE-NEXT: xorps %xmm0, %xmm0
227 ; AVX-LABEL: combine_vec_shl_shl_zero1:
229 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
231 %1 = shl <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
232 %2 = shl <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
236 ; fold (shl (ext (shl x, c1)), c2) -> (shl (ext x), (add c1, c2))
237 define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
238 ; SSE2-LABEL: combine_vec_shl_ext_shl0:
240 ; SSE2-NEXT: movdqa %xmm0, %xmm1
241 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
242 ; SSE2-NEXT: pslld $20, %xmm0
243 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
244 ; SSE2-NEXT: pslld $20, %xmm1
247 ; SSE41-LABEL: combine_vec_shl_ext_shl0:
249 ; SSE41-NEXT: movdqa %xmm0, %xmm1
250 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
251 ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
252 ; SSE41-NEXT: pslld $20, %xmm1
253 ; SSE41-NEXT: pslld $20, %xmm0
256 ; AVX-LABEL: combine_vec_shl_ext_shl0:
258 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
259 ; AVX-NEXT: vpslld $20, %ymm0, %ymm0
261 %1 = shl <8 x i16> %x, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
262 %2 = sext <8 x i16> %1 to <8 x i32>
263 %3 = shl <8 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
267 define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
268 ; SSE-LABEL: combine_vec_shl_ext_shl1:
270 ; SSE-NEXT: xorps %xmm0, %xmm0
271 ; SSE-NEXT: xorps %xmm1, %xmm1
274 ; AVX-LABEL: combine_vec_shl_ext_shl1:
276 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
278 %1 = shl <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
279 %2 = sext <8 x i16> %1 to <8 x i32>
280 %3 = shl <8 x i32> %2, <i32 31, i32 31, i32 30, i32 30, i32 29, i32 29, i32 28, i32 28>
284 define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) {
285 ; SSE2-LABEL: combine_vec_shl_ext_shl2:
287 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
288 ; SSE2-NEXT: psrad $16, %xmm1
289 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [131072,524288,2097152,8388608]
290 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
291 ; SSE2-NEXT: pmuludq %xmm3, %xmm1
292 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
293 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
294 ; SSE2-NEXT: pmuludq %xmm4, %xmm1
295 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
296 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
297 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
298 ; SSE2-NEXT: psrad $16, %xmm0
299 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [33554432,134217728,536870912,2147483648]
300 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
301 ; SSE2-NEXT: pmuludq %xmm3, %xmm0
302 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
303 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
304 ; SSE2-NEXT: pmuludq %xmm4, %xmm0
305 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
306 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
307 ; SSE2-NEXT: movdqa %xmm2, %xmm0
310 ; SSE41-LABEL: combine_vec_shl_ext_shl2:
312 ; SSE41-NEXT: pmovsxwd %xmm0, %xmm2
313 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2
314 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
315 ; SSE41-NEXT: pmovsxwd %xmm0, %xmm1
316 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
317 ; SSE41-NEXT: movdqa %xmm2, %xmm0
320 ; AVX-LABEL: combine_vec_shl_ext_shl2:
322 ; AVX-NEXT: vpmovsxwd %xmm0, %ymm0
323 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
325 %1 = shl <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
326 %2 = sext <8 x i16> %1 to <8 x i32>
327 %3 = shl <8 x i32> %2, <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
331 ; fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
332 define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
333 ; SSE2-LABEL: combine_vec_shl_zext_lshr0:
335 ; SSE2-NEXT: movdqa %xmm0, %xmm1
336 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
337 ; SSE2-NEXT: pxor %xmm2, %xmm2
338 ; SSE2-NEXT: movdqa %xmm1, %xmm0
339 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
340 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
343 ; SSE41-LABEL: combine_vec_shl_zext_lshr0:
345 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
346 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
347 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
348 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
349 ; SSE41-NEXT: movdqa %xmm2, %xmm0
352 ; AVX-LABEL: combine_vec_shl_zext_lshr0:
354 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
355 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
357 %1 = lshr <8 x i16> %x, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
358 %2 = zext <8 x i16> %1 to <8 x i32>
359 %3 = shl <8 x i32> %2, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
363 define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
364 ; SSE2-LABEL: combine_vec_shl_zext_lshr1:
366 ; SSE2-NEXT: movdqa %xmm0, %xmm1
367 ; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1
368 ; SSE2-NEXT: pxor %xmm2, %xmm2
369 ; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm1
370 ; SSE2-NEXT: movdqa %xmm1, %xmm0
371 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
372 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
375 ; SSE41-LABEL: combine_vec_shl_zext_lshr1:
377 ; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm0
378 ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0
379 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
380 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
381 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
382 ; SSE41-NEXT: movdqa %xmm2, %xmm0
385 ; AVX-LABEL: combine_vec_shl_zext_lshr1:
387 ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm0
388 ; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
389 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
391 %1 = lshr <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
392 %2 = zext <8 x i16> %1 to <8 x i32>
393 %3 = shl <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
397 ; fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
398 define <4 x i32> @combine_vec_shl_ge_ashr_extact0(<4 x i32> %x) {
399 ; SSE-LABEL: combine_vec_shl_ge_ashr_extact0:
401 ; SSE-NEXT: pslld $2, %xmm0
404 ; AVX-LABEL: combine_vec_shl_ge_ashr_extact0:
406 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
408 %1 = ashr exact <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
409 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
413 define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
414 ; SSE2-LABEL: combine_vec_shl_ge_ashr_extact1:
416 ; SSE2-NEXT: movdqa %xmm0, %xmm1
417 ; SSE2-NEXT: psrad $5, %xmm1
418 ; SSE2-NEXT: movdqa %xmm0, %xmm2
419 ; SSE2-NEXT: psrad $3, %xmm2
420 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,3]
421 ; SSE2-NEXT: movdqa %xmm0, %xmm1
422 ; SSE2-NEXT: psrad $8, %xmm1
423 ; SSE2-NEXT: psrad $4, %xmm0
424 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
425 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32,64,128,256]
426 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
427 ; SSE2-NEXT: pmuludq %xmm0, %xmm3
428 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
429 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
430 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
431 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
434 ; SSE41-LABEL: combine_vec_shl_ge_ashr_extact1:
436 ; SSE41-NEXT: movdqa %xmm0, %xmm1
437 ; SSE41-NEXT: psrad $8, %xmm1
438 ; SSE41-NEXT: movdqa %xmm0, %xmm2
439 ; SSE41-NEXT: psrad $4, %xmm2
440 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
441 ; SSE41-NEXT: movdqa %xmm0, %xmm1
442 ; SSE41-NEXT: psrad $5, %xmm1
443 ; SSE41-NEXT: psrad $3, %xmm0
444 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
445 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
446 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
449 ; AVX-LABEL: combine_vec_shl_ge_ashr_extact1:
451 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
452 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
454 %1 = ashr exact <4 x i32> %x, <i32 3, i32 4, i32 5, i32 8>
455 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
459 ; fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
460 define <4 x i32> @combine_vec_shl_lt_ashr_extact0(<4 x i32> %x) {
461 ; SSE-LABEL: combine_vec_shl_lt_ashr_extact0:
463 ; SSE-NEXT: psrad $2, %xmm0
466 ; AVX-LABEL: combine_vec_shl_lt_ashr_extact0:
468 ; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
470 %1 = ashr exact <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
471 %2 = shl <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
475 define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
476 ; SSE2-LABEL: combine_vec_shl_lt_ashr_extact1:
478 ; SSE2-NEXT: movdqa %xmm0, %xmm1
479 ; SSE2-NEXT: psrad $7, %xmm1
480 ; SSE2-NEXT: movdqa %xmm0, %xmm2
481 ; SSE2-NEXT: psrad $5, %xmm2
482 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,3]
483 ; SSE2-NEXT: movdqa %xmm0, %xmm1
484 ; SSE2-NEXT: psrad $8, %xmm1
485 ; SSE2-NEXT: psrad $6, %xmm0
486 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
487 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,16,32,256]
488 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
489 ; SSE2-NEXT: pmuludq %xmm0, %xmm3
490 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
491 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
492 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
493 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
496 ; SSE41-LABEL: combine_vec_shl_lt_ashr_extact1:
498 ; SSE41-NEXT: movdqa %xmm0, %xmm1
499 ; SSE41-NEXT: psrad $8, %xmm1
500 ; SSE41-NEXT: movdqa %xmm0, %xmm2
501 ; SSE41-NEXT: psrad $6, %xmm2
502 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
503 ; SSE41-NEXT: movdqa %xmm0, %xmm1
504 ; SSE41-NEXT: psrad $7, %xmm1
505 ; SSE41-NEXT: psrad $5, %xmm0
506 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
507 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
508 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
511 ; AVX-LABEL: combine_vec_shl_lt_ashr_extact1:
513 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
514 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
516 %1 = ashr exact <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
517 %2 = shl <4 x i32> %1, <i32 3, i32 4, i32 5, i32 8>
521 ; fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) if C2 > C1
522 define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) {
523 ; SSE-LABEL: combine_vec_shl_gt_lshr0:
525 ; SSE-NEXT: pslld $2, %xmm0
526 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
529 ; AVX-LABEL: combine_vec_shl_gt_lshr0:
531 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
532 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
533 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
535 %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
536 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
540 define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
541 ; SSE2-LABEL: combine_vec_shl_gt_lshr1:
543 ; SSE2-NEXT: movdqa %xmm0, %xmm1
544 ; SSE2-NEXT: psrld $5, %xmm1
545 ; SSE2-NEXT: movdqa %xmm0, %xmm2
546 ; SSE2-NEXT: psrld $3, %xmm2
547 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,3]
548 ; SSE2-NEXT: movdqa %xmm0, %xmm1
549 ; SSE2-NEXT: psrld $8, %xmm1
550 ; SSE2-NEXT: psrld $4, %xmm0
551 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
552 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32,64,128,256]
553 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
554 ; SSE2-NEXT: pmuludq %xmm0, %xmm3
555 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
556 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
557 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
558 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
561 ; SSE41-LABEL: combine_vec_shl_gt_lshr1:
563 ; SSE41-NEXT: movdqa %xmm0, %xmm1
564 ; SSE41-NEXT: psrld $8, %xmm1
565 ; SSE41-NEXT: movdqa %xmm0, %xmm2
566 ; SSE41-NEXT: psrld $4, %xmm2
567 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
568 ; SSE41-NEXT: movdqa %xmm0, %xmm1
569 ; SSE41-NEXT: psrld $5, %xmm1
570 ; SSE41-NEXT: psrld $3, %xmm0
571 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
572 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
573 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
576 ; AVX-LABEL: combine_vec_shl_gt_lshr1:
578 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
579 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
581 %1 = lshr <4 x i32> %x, <i32 3, i32 4, i32 5, i32 8>
582 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
586 ; fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 >= C2
587 define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) {
588 ; SSE-LABEL: combine_vec_shl_le_lshr0:
590 ; SSE-NEXT: psrld $2, %xmm0
591 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
594 ; AVX-LABEL: combine_vec_shl_le_lshr0:
596 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1073741816,1073741816,1073741816,1073741816]
597 ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
598 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
600 %1 = lshr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
601 %2 = shl <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
605 define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
606 ; SSE2-LABEL: combine_vec_shl_le_lshr1:
608 ; SSE2-NEXT: movdqa %xmm0, %xmm1
609 ; SSE2-NEXT: psrld $7, %xmm1
610 ; SSE2-NEXT: movdqa %xmm0, %xmm2
611 ; SSE2-NEXT: psrld $5, %xmm2
612 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,3]
613 ; SSE2-NEXT: movdqa %xmm0, %xmm1
614 ; SSE2-NEXT: psrld $8, %xmm1
615 ; SSE2-NEXT: psrld $6, %xmm0
616 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
617 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,16,32,256]
618 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
619 ; SSE2-NEXT: pmuludq %xmm0, %xmm3
620 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
621 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
622 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
623 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
626 ; SSE41-LABEL: combine_vec_shl_le_lshr1:
628 ; SSE41-NEXT: movdqa %xmm0, %xmm1
629 ; SSE41-NEXT: psrld $8, %xmm1
630 ; SSE41-NEXT: movdqa %xmm0, %xmm2
631 ; SSE41-NEXT: psrld $6, %xmm2
632 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
633 ; SSE41-NEXT: movdqa %xmm0, %xmm1
634 ; SSE41-NEXT: psrld $7, %xmm1
635 ; SSE41-NEXT: psrld $5, %xmm0
636 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
637 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
638 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
641 ; AVX-LABEL: combine_vec_shl_le_lshr1:
643 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
644 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
646 %1 = lshr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
647 %2 = shl <4 x i32> %1, <i32 3, i32 4, i32 5, i32 8>
651 ; fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
652 define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
653 ; SSE-LABEL: combine_vec_shl_ashr0:
655 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
658 ; AVX-LABEL: combine_vec_shl_ashr0:
660 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
661 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
663 %1 = ashr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
664 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
668 define <4 x i32> @combine_vec_shl_ashr1(<4 x i32> %x) {
669 ; SSE-LABEL: combine_vec_shl_ashr1:
671 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
674 ; AVX-LABEL: combine_vec_shl_ashr1:
676 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
678 %1 = ashr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
679 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
683 ; fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
684 define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
685 ; SSE-LABEL: combine_vec_shl_add0:
687 ; SSE-NEXT: pslld $2, %xmm0
688 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
691 ; AVX-LABEL: combine_vec_shl_add0:
693 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
694 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
695 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
697 %1 = add <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
698 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
702 define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
703 ; SSE2-LABEL: combine_vec_shl_add1:
705 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
706 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
707 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
708 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
709 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
710 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
711 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
712 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
713 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
716 ; SSE41-LABEL: combine_vec_shl_add1:
718 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
719 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
722 ; AVX-LABEL: combine_vec_shl_add1:
724 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
725 ; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
727 %1 = add <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
728 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
732 ; fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
733 define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) {
734 ; SSE-LABEL: combine_vec_shl_or0:
736 ; SSE-NEXT: pslld $2, %xmm0
737 ; SSE-NEXT: por {{.*}}(%rip), %xmm0
740 ; AVX-LABEL: combine_vec_shl_or0:
742 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
743 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
744 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
746 %1 = or <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
747 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
751 define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
752 ; SSE2-LABEL: combine_vec_shl_or1:
754 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
755 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
756 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
757 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
758 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
759 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
760 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
761 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
762 ; SSE2-NEXT: por {{.*}}(%rip), %xmm0
765 ; SSE41-LABEL: combine_vec_shl_or1:
767 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
768 ; SSE41-NEXT: por {{.*}}(%rip), %xmm0
771 ; AVX-LABEL: combine_vec_shl_or1:
773 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
774 ; AVX-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
776 %1 = or <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
777 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
781 ; fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
782 define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
783 ; SSE2-LABEL: combine_vec_shl_mul0:
785 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [20,20,20,20]
786 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
787 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
788 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
789 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
790 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
791 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
794 ; SSE41-LABEL: combine_vec_shl_mul0:
796 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
799 ; AVX-LABEL: combine_vec_shl_mul0:
801 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
802 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
804 %1 = mul <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
805 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
809 define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
810 ; SSE2-LABEL: combine_vec_shl_mul1:
812 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [10,24,56,128]
813 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
814 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
815 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
816 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
817 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
818 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
819 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
822 ; SSE41-LABEL: combine_vec_shl_mul1:
824 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
827 ; AVX-LABEL: combine_vec_shl_mul1:
829 ; AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
831 %1 = mul <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
832 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>