1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-SLOW
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX-FAST
8 define <4 x i32> @combine_vec_shl_zero(<4 x i32> %x) {
9 ; SSE-LABEL: combine_vec_shl_zero:
11 ; SSE-NEXT: xorps %xmm0, %xmm0
14 ; AVX-LABEL: combine_vec_shl_zero:
16 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
18 %1 = shl <4 x i32> zeroinitializer, %x
22 ; fold (shl x, c >= size(x)) -> undef
23 define <4 x i32> @combine_vec_shl_outofrange0(<4 x i32> %x) {
24 ; CHECK-LABEL: combine_vec_shl_outofrange0:
27 %1 = shl <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
31 define <4 x i32> @combine_vec_shl_outofrange1(<4 x i32> %x) {
32 ; CHECK-LABEL: combine_vec_shl_outofrange1:
35 %1 = shl <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
39 define <4 x i32> @combine_vec_shl_outofrange2(<4 x i32> %a0) {
40 ; CHECK-LABEL: combine_vec_shl_outofrange2:
43 %1 = and <4 x i32> %a0, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
44 %2 = shl <4 x i32> %1, <i32 33, i32 33, i32 33, i32 33>
48 define <4 x i32> @combine_vec_shl_outofrange3(<4 x i32> %a0) {
49 ; CHECK-LABEL: combine_vec_shl_outofrange3:
52 %1 = shl <4 x i32> %a0, <i32 33, i32 34, i32 35, i32 undef>
56 ; fold (shl x, 0) -> x
57 define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) {
58 ; CHECK-LABEL: combine_vec_shl_by_zero:
61 %1 = shl <4 x i32> %x, zeroinitializer
65 ; if (shl x, c) is known to be zero, return 0
66 define <4 x i32> @combine_vec_shl_known_zero0(<4 x i32> %x) {
67 ; SSE-LABEL: combine_vec_shl_known_zero0:
69 ; SSE-NEXT: xorps %xmm0, %xmm0
72 ; AVX-LABEL: combine_vec_shl_known_zero0:
74 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
76 %1 = and <4 x i32> %x, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
77 %2 = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
81 define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
82 ; SSE2-LABEL: combine_vec_shl_known_zero1:
84 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
85 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65536,32768,16384,8192]
86 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
87 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
88 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
89 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
90 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
91 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
92 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
95 ; SSE41-LABEL: combine_vec_shl_known_zero1:
97 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
98 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
101 ; AVX-LABEL: combine_vec_shl_known_zero1:
103 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
104 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
106 %1 = and <4 x i32> %x, <i32 4294901760, i32 8589803520, i32 17179607040, i32 34359214080>
107 %2 = shl <4 x i32> %1, <i32 16, i32 15, i32 14, i32 13>
111 ; fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
112 define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
113 ; SSE2-LABEL: combine_vec_shl_trunc_and:
115 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
116 ; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
117 ; SSE2-NEXT: pslld $23, %xmm1
118 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
119 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
120 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
121 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
122 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
123 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
124 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
125 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
126 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
129 ; SSE41-LABEL: combine_vec_shl_trunc_and:
131 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
132 ; SSE41-NEXT: andps {{.*}}(%rip), %xmm1
133 ; SSE41-NEXT: pslld $23, %xmm1
134 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
135 ; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
136 ; SSE41-NEXT: pmulld %xmm1, %xmm0
139 ; AVX-SLOW-LABEL: combine_vec_shl_trunc_and:
141 ; AVX-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
142 ; AVX-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
143 ; AVX-SLOW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
144 ; AVX-SLOW-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
145 ; AVX-SLOW-NEXT: vzeroupper
146 ; AVX-SLOW-NEXT: retq
148 ; AVX-FAST-LABEL: combine_vec_shl_trunc_and:
150 ; AVX-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
151 ; AVX-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
152 ; AVX-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
153 ; AVX-FAST-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
154 ; AVX-FAST-NEXT: vzeroupper
155 ; AVX-FAST-NEXT: retq
156 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
157 %2 = trunc <4 x i64> %1 to <4 x i32>
158 %3 = shl <4 x i32> %x, %2
162 ; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
163 define <4 x i32> @combine_vec_shl_shl0(<4 x i32> %x) {
164 ; SSE-LABEL: combine_vec_shl_shl0:
166 ; SSE-NEXT: pslld $6, %xmm0
169 ; AVX-LABEL: combine_vec_shl_shl0:
171 ; AVX-NEXT: vpslld $6, %xmm0, %xmm0
173 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
174 %2 = shl <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
178 define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
179 ; SSE2-LABEL: combine_vec_shl_shl1:
181 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,64,256,1024]
182 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
183 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
184 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
185 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
186 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
187 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
188 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
191 ; SSE41-LABEL: combine_vec_shl_shl1:
193 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
196 ; AVX-LABEL: combine_vec_shl_shl1:
198 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
200 %1 = shl <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
201 %2 = shl <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
205 ; fold (shl (shl x, c1), c2) -> 0
206 define <4 x i32> @combine_vec_shl_shlr_zero0(<4 x i32> %x) {
207 ; SSE-LABEL: combine_vec_shl_shlr_zero0:
209 ; SSE-NEXT: xorps %xmm0, %xmm0
212 ; AVX-LABEL: combine_vec_shl_shlr_zero0:
214 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
216 %1 = shl <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
217 %2 = shl <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
221 define <4 x i32> @combine_vec_shl_shl_zero1(<4 x i32> %x) {
222 ; SSE-LABEL: combine_vec_shl_shl_zero1:
224 ; SSE-NEXT: xorps %xmm0, %xmm0
227 ; AVX-LABEL: combine_vec_shl_shl_zero1:
229 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
231 %1 = shl <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
232 %2 = shl <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
236 ; fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
237 define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
238 ; SSE2-LABEL: combine_vec_shl_ext_shl0:
240 ; SSE2-NEXT: movdqa %xmm0, %xmm1
241 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
242 ; SSE2-NEXT: pslld $20, %xmm0
243 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
244 ; SSE2-NEXT: pslld $20, %xmm1
247 ; SSE41-LABEL: combine_vec_shl_ext_shl0:
249 ; SSE41-NEXT: movdqa %xmm0, %xmm1
250 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
251 ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
252 ; SSE41-NEXT: pslld $20, %xmm1
253 ; SSE41-NEXT: pslld $20, %xmm0
256 ; AVX-LABEL: combine_vec_shl_ext_shl0:
258 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
259 ; AVX-NEXT: vpslld $20, %ymm0, %ymm0
261 %1 = shl <8 x i16> %x, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
262 %2 = sext <8 x i16> %1 to <8 x i32>
263 %3 = shl <8 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
267 define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
268 ; SSE2-LABEL: combine_vec_shl_ext_shl1:
270 ; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0
271 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
272 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
273 ; SSE2-NEXT: psrad $16, %xmm1
274 ; SSE2-NEXT: movdqa %xmm1, %xmm2
275 ; SSE2-NEXT: pslld $29, %xmm2
276 ; SSE2-NEXT: pslld $28, %xmm1
277 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
278 ; SSE2-NEXT: pslld $30, %xmm0
279 ; SSE2-NEXT: xorpd %xmm2, %xmm2
280 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
283 ; SSE41-LABEL: combine_vec_shl_ext_shl1:
285 ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0
286 ; SSE41-NEXT: pmovsxwd %xmm0, %xmm0
287 ; SSE41-NEXT: pslld $30, %xmm0
288 ; SSE41-NEXT: pxor %xmm1, %xmm1
289 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
290 ; SSE41-NEXT: pxor %xmm1, %xmm1
293 ; AVX-LABEL: combine_vec_shl_ext_shl1:
295 ; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
296 ; AVX-NEXT: vpmovsxwd %xmm0, %ymm0
297 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
299 %1 = shl <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
300 %2 = sext <8 x i16> %1 to <8 x i32>
301 %3 = shl <8 x i32> %2, <i32 31, i32 31, i32 30, i32 30, i32 29, i32 29, i32 28, i32 28>
305 ; fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
306 define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
307 ; SSE2-LABEL: combine_vec_shl_zext_lshr0:
309 ; SSE2-NEXT: movdqa %xmm0, %xmm1
310 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
311 ; SSE2-NEXT: pxor %xmm2, %xmm2
312 ; SSE2-NEXT: movdqa %xmm1, %xmm0
313 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
314 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
317 ; SSE41-LABEL: combine_vec_shl_zext_lshr0:
319 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
320 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
321 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
322 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
323 ; SSE41-NEXT: movdqa %xmm2, %xmm0
326 ; AVX-LABEL: combine_vec_shl_zext_lshr0:
328 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
329 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
331 %1 = lshr <8 x i16> %x, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
332 %2 = zext <8 x i16> %1 to <8 x i32>
333 %3 = shl <8 x i32> %2, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
337 define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
338 ; SSE2-LABEL: combine_vec_shl_zext_lshr1:
340 ; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm0
341 ; SSE2-NEXT: pxor %xmm1, %xmm1
342 ; SSE2-NEXT: movdqa %xmm0, %xmm2
343 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
344 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
345 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
346 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
347 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
348 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
349 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
350 ; SSE2-NEXT: pmuludq %xmm3, %xmm1
351 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
352 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
353 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32,64,128,256]
354 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
355 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
356 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
357 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
358 ; SSE2-NEXT: pmuludq %xmm4, %xmm2
359 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
360 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
363 ; SSE41-LABEL: combine_vec_shl_zext_lshr1:
365 ; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm0
366 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
367 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
368 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
369 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
370 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
373 ; AVX-LABEL: combine_vec_shl_zext_lshr1:
375 ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm0
376 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
377 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
379 %1 = lshr <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
380 %2 = zext <8 x i16> %1 to <8 x i32>
381 %3 = shl <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
385 ; fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
386 define <4 x i32> @combine_vec_shl_ge_ashr_extact0(<4 x i32> %x) {
387 ; SSE-LABEL: combine_vec_shl_ge_ashr_extact0:
389 ; SSE-NEXT: pslld $2, %xmm0
392 ; AVX-LABEL: combine_vec_shl_ge_ashr_extact0:
394 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
396 %1 = ashr exact <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
397 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
401 define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
402 ; SSE2-LABEL: combine_vec_shl_ge_ashr_extact1:
404 ; SSE2-NEXT: movdqa %xmm0, %xmm1
405 ; SSE2-NEXT: psrad $5, %xmm1
406 ; SSE2-NEXT: movdqa %xmm0, %xmm2
407 ; SSE2-NEXT: psrad $3, %xmm2
408 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,3]
409 ; SSE2-NEXT: movdqa %xmm0, %xmm1
410 ; SSE2-NEXT: psrad $8, %xmm1
411 ; SSE2-NEXT: psrad $4, %xmm0
412 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
413 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32,64,128,256]
414 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
415 ; SSE2-NEXT: pmuludq %xmm0, %xmm3
416 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
417 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
418 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
419 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
422 ; SSE41-LABEL: combine_vec_shl_ge_ashr_extact1:
424 ; SSE41-NEXT: movdqa %xmm0, %xmm1
425 ; SSE41-NEXT: psrad $8, %xmm1
426 ; SSE41-NEXT: movdqa %xmm0, %xmm2
427 ; SSE41-NEXT: psrad $4, %xmm2
428 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
429 ; SSE41-NEXT: movdqa %xmm0, %xmm1
430 ; SSE41-NEXT: psrad $5, %xmm1
431 ; SSE41-NEXT: psrad $3, %xmm0
432 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
433 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
434 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
437 ; AVX-LABEL: combine_vec_shl_ge_ashr_extact1:
439 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
440 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
442 %1 = ashr exact <4 x i32> %x, <i32 3, i32 4, i32 5, i32 8>
443 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
447 ; fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
448 define <4 x i32> @combine_vec_shl_lt_ashr_extact0(<4 x i32> %x) {
449 ; SSE-LABEL: combine_vec_shl_lt_ashr_extact0:
451 ; SSE-NEXT: psrad $2, %xmm0
454 ; AVX-LABEL: combine_vec_shl_lt_ashr_extact0:
456 ; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
458 %1 = ashr exact <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
459 %2 = shl <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
463 define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
464 ; SSE2-LABEL: combine_vec_shl_lt_ashr_extact1:
466 ; SSE2-NEXT: movdqa %xmm0, %xmm1
467 ; SSE2-NEXT: psrad $7, %xmm1
468 ; SSE2-NEXT: movdqa %xmm0, %xmm2
469 ; SSE2-NEXT: psrad $5, %xmm2
470 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,3]
471 ; SSE2-NEXT: movdqa %xmm0, %xmm1
472 ; SSE2-NEXT: psrad $8, %xmm1
473 ; SSE2-NEXT: psrad $6, %xmm0
474 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
475 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,16,32,256]
476 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
477 ; SSE2-NEXT: pmuludq %xmm0, %xmm3
478 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
479 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
480 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
481 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
484 ; SSE41-LABEL: combine_vec_shl_lt_ashr_extact1:
486 ; SSE41-NEXT: movdqa %xmm0, %xmm1
487 ; SSE41-NEXT: psrad $8, %xmm1
488 ; SSE41-NEXT: movdqa %xmm0, %xmm2
489 ; SSE41-NEXT: psrad $6, %xmm2
490 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
491 ; SSE41-NEXT: movdqa %xmm0, %xmm1
492 ; SSE41-NEXT: psrad $7, %xmm1
493 ; SSE41-NEXT: psrad $5, %xmm0
494 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
495 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
496 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
499 ; AVX-LABEL: combine_vec_shl_lt_ashr_extact1:
501 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
502 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
504 %1 = ashr exact <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
505 %2 = shl <4 x i32> %1, <i32 3, i32 4, i32 5, i32 8>
509 ; fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) if C2 > C1
510 define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) {
511 ; SSE-LABEL: combine_vec_shl_gt_lshr0:
513 ; SSE-NEXT: pslld $2, %xmm0
514 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
517 ; AVX-LABEL: combine_vec_shl_gt_lshr0:
519 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
520 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
521 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
523 %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
524 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
528 define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
529 ; SSE2-LABEL: combine_vec_shl_gt_lshr1:
531 ; SSE2-NEXT: movdqa %xmm0, %xmm1
532 ; SSE2-NEXT: psrld $5, %xmm1
533 ; SSE2-NEXT: movdqa %xmm0, %xmm2
534 ; SSE2-NEXT: psrld $3, %xmm2
535 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,3]
536 ; SSE2-NEXT: movdqa %xmm0, %xmm1
537 ; SSE2-NEXT: psrld $8, %xmm1
538 ; SSE2-NEXT: psrld $4, %xmm0
539 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
540 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32,64,128,256]
541 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
542 ; SSE2-NEXT: pmuludq %xmm0, %xmm3
543 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
544 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
545 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
546 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
549 ; SSE41-LABEL: combine_vec_shl_gt_lshr1:
551 ; SSE41-NEXT: movdqa %xmm0, %xmm1
552 ; SSE41-NEXT: psrld $8, %xmm1
553 ; SSE41-NEXT: movdqa %xmm0, %xmm2
554 ; SSE41-NEXT: psrld $4, %xmm2
555 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
556 ; SSE41-NEXT: movdqa %xmm0, %xmm1
557 ; SSE41-NEXT: psrld $5, %xmm1
558 ; SSE41-NEXT: psrld $3, %xmm0
559 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
560 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
561 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
564 ; AVX-LABEL: combine_vec_shl_gt_lshr1:
566 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
567 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
569 %1 = lshr <4 x i32> %x, <i32 3, i32 4, i32 5, i32 8>
570 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
574 ; fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 >= C2
575 define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) {
576 ; SSE-LABEL: combine_vec_shl_le_lshr0:
578 ; SSE-NEXT: psrld $2, %xmm0
579 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
582 ; AVX-LABEL: combine_vec_shl_le_lshr0:
584 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1073741816,1073741816,1073741816,1073741816]
585 ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
586 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
588 %1 = lshr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
589 %2 = shl <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
593 define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
594 ; SSE2-LABEL: combine_vec_shl_le_lshr1:
596 ; SSE2-NEXT: movdqa %xmm0, %xmm1
597 ; SSE2-NEXT: psrld $7, %xmm1
598 ; SSE2-NEXT: movdqa %xmm0, %xmm2
599 ; SSE2-NEXT: psrld $5, %xmm2
600 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[2,3]
601 ; SSE2-NEXT: movdqa %xmm0, %xmm1
602 ; SSE2-NEXT: psrld $8, %xmm1
603 ; SSE2-NEXT: psrld $6, %xmm0
604 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[3,3]
605 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,16,32,256]
606 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
607 ; SSE2-NEXT: pmuludq %xmm0, %xmm3
608 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
609 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
610 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
611 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
614 ; SSE41-LABEL: combine_vec_shl_le_lshr1:
616 ; SSE41-NEXT: movdqa %xmm0, %xmm1
617 ; SSE41-NEXT: psrld $8, %xmm1
618 ; SSE41-NEXT: movdqa %xmm0, %xmm2
619 ; SSE41-NEXT: psrld $6, %xmm2
620 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
621 ; SSE41-NEXT: movdqa %xmm0, %xmm1
622 ; SSE41-NEXT: psrld $7, %xmm1
623 ; SSE41-NEXT: psrld $5, %xmm0
624 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
625 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
626 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
629 ; AVX-LABEL: combine_vec_shl_le_lshr1:
631 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
632 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
634 %1 = lshr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
635 %2 = shl <4 x i32> %1, <i32 3, i32 4, i32 5, i32 8>
639 ; fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
640 define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
641 ; SSE-LABEL: combine_vec_shl_ashr0:
643 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
646 ; AVX-LABEL: combine_vec_shl_ashr0:
648 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
649 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
651 %1 = ashr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
652 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
656 define <4 x i32> @combine_vec_shl_ashr1(<4 x i32> %x) {
657 ; SSE-LABEL: combine_vec_shl_ashr1:
659 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
662 ; AVX-LABEL: combine_vec_shl_ashr1:
664 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
666 %1 = ashr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
667 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
671 ; fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
672 define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
673 ; SSE-LABEL: combine_vec_shl_add0:
675 ; SSE-NEXT: pslld $2, %xmm0
676 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
679 ; AVX-LABEL: combine_vec_shl_add0:
681 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
682 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
683 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
685 %1 = add <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
686 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
690 define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
691 ; SSE2-LABEL: combine_vec_shl_add1:
693 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
694 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
695 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
696 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
697 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
698 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
699 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
700 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
701 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
704 ; SSE41-LABEL: combine_vec_shl_add1:
706 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
707 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
710 ; AVX-LABEL: combine_vec_shl_add1:
712 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
713 ; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
715 %1 = add <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
716 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
720 ; fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
721 define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) {
722 ; SSE-LABEL: combine_vec_shl_or0:
724 ; SSE-NEXT: pslld $2, %xmm0
725 ; SSE-NEXT: por {{.*}}(%rip), %xmm0
728 ; AVX-LABEL: combine_vec_shl_or0:
730 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
731 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
732 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
734 %1 = or <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
735 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
739 define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
740 ; SSE2-LABEL: combine_vec_shl_or1:
742 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
743 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
744 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
745 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
746 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
747 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
748 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
749 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
750 ; SSE2-NEXT: por {{.*}}(%rip), %xmm0
753 ; SSE41-LABEL: combine_vec_shl_or1:
755 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
756 ; SSE41-NEXT: por {{.*}}(%rip), %xmm0
759 ; AVX-LABEL: combine_vec_shl_or1:
761 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
762 ; AVX-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
764 %1 = or <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
765 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
769 ; fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
770 define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
771 ; SSE2-LABEL: combine_vec_shl_mul0:
773 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [20,20,20,20]
774 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
775 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
776 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
777 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
778 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
779 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
782 ; SSE41-LABEL: combine_vec_shl_mul0:
784 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
787 ; AVX-LABEL: combine_vec_shl_mul0:
789 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
790 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
792 %1 = mul <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
793 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
797 define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
798 ; SSE2-LABEL: combine_vec_shl_mul1:
800 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [10,24,56,128]
801 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
802 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
803 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
804 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
805 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
806 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
807 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
810 ; SSE41-LABEL: combine_vec_shl_mul1:
812 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
815 ; AVX-LABEL: combine_vec_shl_mul1:
817 ; AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
819 %1 = mul <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
820 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>