1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
6 define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
7 ; SSE-LABEL: combine_vec_lshr_zero:
9 ; SSE-NEXT: xorps %xmm0, %xmm0
12 ; AVX-LABEL: combine_vec_lshr_zero:
14 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
16 %1 = lshr <4 x i32> zeroinitializer, %x
20 ; fold (srl x, c >= size(x)) -> undef
21 define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) {
22 ; SSE-LABEL: combine_vec_lshr_outofrange0:
26 ; AVX-LABEL: combine_vec_lshr_outofrange0:
29 %1 = lshr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
33 define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) {
34 ; SSE-LABEL: combine_vec_lshr_outofrange1:
38 ; AVX-LABEL: combine_vec_lshr_outofrange1:
41 %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
45 ; fold (srl x, 0) -> x
46 define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
47 ; SSE-LABEL: combine_vec_lshr_by_zero:
51 ; AVX-LABEL: combine_vec_lshr_by_zero:
54 %1 = lshr <4 x i32> %x, zeroinitializer
58 ; if (srl x, c) is known to be zero, return 0
59 define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) {
60 ; SSE-LABEL: combine_vec_lshr_known_zero0:
62 ; SSE-NEXT: xorps %xmm0, %xmm0
65 ; AVX-LABEL: combine_vec_lshr_known_zero0:
67 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
69 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
70 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
74 define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
75 ; SSE-LABEL: combine_vec_lshr_known_zero1:
77 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
78 ; SSE-NEXT: movdqa %xmm0, %xmm1
79 ; SSE-NEXT: psrld $11, %xmm1
80 ; SSE-NEXT: movdqa %xmm0, %xmm2
81 ; SSE-NEXT: psrld $9, %xmm2
82 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
83 ; SSE-NEXT: movdqa %xmm0, %xmm1
84 ; SSE-NEXT: psrld $10, %xmm1
85 ; SSE-NEXT: psrld $8, %xmm0
86 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
87 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
90 ; AVX-LABEL: combine_vec_lshr_known_zero1:
92 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
93 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
94 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
96 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
97 %2 = lshr <4 x i32> %1, <i32 8, i32 9, i32 10, i32 11>
101 ; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
102 define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) {
103 ; SSE-LABEL: combine_vec_lshr_lshr0:
105 ; SSE-NEXT: psrld $6, %xmm0
108 ; AVX-LABEL: combine_vec_lshr_lshr0:
110 ; AVX-NEXT: vpsrld $6, %xmm0, %xmm0
112 %1 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
113 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
117 define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
118 ; SSE-LABEL: combine_vec_lshr_lshr1:
120 ; SSE-NEXT: movdqa %xmm0, %xmm1
121 ; SSE-NEXT: psrld $10, %xmm1
122 ; SSE-NEXT: movdqa %xmm0, %xmm2
123 ; SSE-NEXT: psrld $6, %xmm2
124 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
125 ; SSE-NEXT: movdqa %xmm0, %xmm1
126 ; SSE-NEXT: psrld $8, %xmm1
127 ; SSE-NEXT: psrld $4, %xmm0
128 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
129 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
132 ; AVX-LABEL: combine_vec_lshr_lshr1:
134 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
136 %1 = lshr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
137 %2 = lshr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
141 ; fold (srl (srl x, c1), c2) -> 0
142 define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) {
143 ; SSE-LABEL: combine_vec_lshr_lshr_zero0:
145 ; SSE-NEXT: xorps %xmm0, %xmm0
148 ; AVX-LABEL: combine_vec_lshr_lshr_zero0:
150 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
152 %1 = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
153 %2 = lshr <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
157 define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) {
158 ; SSE-LABEL: combine_vec_lshr_lshr_zero1:
160 ; SSE-NEXT: xorps %xmm0, %xmm0
163 ; AVX-LABEL: combine_vec_lshr_lshr_zero1:
165 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
167 %1 = lshr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
168 %2 = lshr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
172 ; fold (srl (trunc (srl x, c1)), c2) -> (trunc (srl x, (add c1, c2)))
173 define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
174 ; SSE-LABEL: combine_vec_lshr_trunc_lshr0:
176 ; SSE-NEXT: psrlq $48, %xmm1
177 ; SSE-NEXT: psrlq $48, %xmm0
178 ; SSE-NEXT: packusdw %xmm1, %xmm0
181 ; AVX-LABEL: combine_vec_lshr_trunc_lshr0:
183 ; AVX-NEXT: vpsrlq $48, %ymm0, %ymm0
184 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
185 ; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
186 ; AVX-NEXT: vzeroupper
188 %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
189 %2 = trunc <4 x i64> %1 to <4 x i32>
190 %3 = lshr <4 x i32> %2, <i32 16, i32 16, i32 16, i32 16>
194 define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
195 ; SSE-LABEL: combine_vec_lshr_trunc_lshr1:
197 ; SSE-NEXT: movdqa %xmm1, %xmm2
198 ; SSE-NEXT: psrlq $35, %xmm2
199 ; SSE-NEXT: psrlq $34, %xmm1
200 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
201 ; SSE-NEXT: movdqa %xmm0, %xmm2
202 ; SSE-NEXT: psrlq $33, %xmm2
203 ; SSE-NEXT: psrlq $32, %xmm0
204 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
205 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
206 ; SSE-NEXT: movaps %xmm0, %xmm1
207 ; SSE-NEXT: psrld $19, %xmm1
208 ; SSE-NEXT: movaps %xmm0, %xmm2
209 ; SSE-NEXT: psrld $17, %xmm2
210 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
211 ; SSE-NEXT: movaps %xmm0, %xmm1
212 ; SSE-NEXT: psrld $18, %xmm1
213 ; SSE-NEXT: psrld $16, %xmm0
214 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
215 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
218 ; AVX-LABEL: combine_vec_lshr_trunc_lshr1:
220 ; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
221 ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
222 ; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
223 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
224 ; AVX-NEXT: vzeroupper
226 %1 = lshr <4 x i64> %x, <i64 32, i64 33, i64 34, i64 35>
227 %2 = trunc <4 x i64> %1 to <4 x i32>
228 %3 = lshr <4 x i32> %2, <i32 16, i32 17, i32 18, i32 19>
232 ; fold (srl (trunc (srl x, c1)), c2) -> 0
233 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) {
234 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero0:
236 ; SSE-NEXT: xorps %xmm0, %xmm0
239 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero0:
241 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
243 %1 = lshr <4 x i64> %x, <i64 48, i64 48, i64 48, i64 48>
244 %2 = trunc <4 x i64> %1 to <4 x i32>
245 %3 = lshr <4 x i32> %2, <i32 24, i32 24, i32 24, i32 24>
249 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
250 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1:
252 ; SSE-NEXT: movdqa %xmm1, %xmm2
253 ; SSE-NEXT: psrlq $51, %xmm2
254 ; SSE-NEXT: psrlq $50, %xmm1
255 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
256 ; SSE-NEXT: movdqa %xmm0, %xmm2
257 ; SSE-NEXT: psrlq $49, %xmm2
258 ; SSE-NEXT: psrlq $48, %xmm0
259 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
260 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
261 ; SSE-NEXT: movaps %xmm0, %xmm1
262 ; SSE-NEXT: psrld $27, %xmm1
263 ; SSE-NEXT: movaps %xmm0, %xmm2
264 ; SSE-NEXT: psrld $25, %xmm2
265 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
266 ; SSE-NEXT: movaps %xmm0, %xmm1
267 ; SSE-NEXT: psrld $26, %xmm1
268 ; SSE-NEXT: psrld $24, %xmm0
269 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
270 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
273 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1:
275 ; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
276 ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
277 ; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
278 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
279 ; AVX-NEXT: vzeroupper
281 %1 = lshr <4 x i64> %x, <i64 48, i64 49, i64 50, i64 51>
282 %2 = trunc <4 x i64> %1 to <4 x i32>
283 %3 = lshr <4 x i32> %2, <i32 24, i32 25, i32 26, i32 27>
287 ; fold (srl (shl x, c), c) -> (and x, cst2)
288 define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
289 ; SSE-LABEL: combine_vec_lshr_shl_mask0:
291 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
294 ; AVX-LABEL: combine_vec_lshr_shl_mask0:
296 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1073741823,1073741823,1073741823,1073741823]
297 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
299 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
300 %2 = lshr <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
304 define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) {
305 ; SSE-LABEL: combine_vec_lshr_shl_mask1:
307 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
310 ; AVX-LABEL: combine_vec_lshr_shl_mask1:
312 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
314 %1 = shl <4 x i32> %x, <i32 2, i32 3, i32 4, i32 5>
315 %2 = lshr <4 x i32> %1, <i32 2, i32 3, i32 4, i32 5>
319 ; fold (srl (sra X, Y), 31) -> (srl X, 31)
320 define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) {
321 ; SSE-LABEL: combine_vec_lshr_ashr_sign:
323 ; SSE-NEXT: psrld $31, %xmm0
326 ; AVX-LABEL: combine_vec_lshr_ashr_sign:
328 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
330 %1 = ashr <4 x i32> %x, %y
331 %2 = lshr <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
335 ; fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
336 define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
337 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit0:
339 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
340 ; SSE-NEXT: psrld $4, %xmm0
341 ; SSE-NEXT: pxor {{.*}}(%rip), %xmm0
344 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit0:
346 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16]
347 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
348 ; AVX-NEXT: vpsrld $4, %xmm0, %xmm0
349 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
350 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
352 %1 = and <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
353 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
354 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
358 define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) {
359 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit1:
361 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
362 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
363 ; SSE-NEXT: movdqa %xmm0, %xmm1
364 ; SSE-NEXT: pand %xmm2, %xmm1
365 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
366 ; SSE-NEXT: movdqa %xmm3, %xmm4
367 ; SSE-NEXT: pshufb %xmm1, %xmm4
368 ; SSE-NEXT: movdqa %xmm0, %xmm1
369 ; SSE-NEXT: psrlw $4, %xmm1
370 ; SSE-NEXT: pand %xmm2, %xmm1
371 ; SSE-NEXT: pxor %xmm2, %xmm2
372 ; SSE-NEXT: pshufb %xmm1, %xmm3
373 ; SSE-NEXT: pcmpeqb %xmm2, %xmm1
374 ; SSE-NEXT: pand %xmm4, %xmm1
375 ; SSE-NEXT: paddb %xmm3, %xmm1
376 ; SSE-NEXT: movdqa %xmm0, %xmm3
377 ; SSE-NEXT: pcmpeqb %xmm2, %xmm3
378 ; SSE-NEXT: psrlw $8, %xmm3
379 ; SSE-NEXT: pand %xmm1, %xmm3
380 ; SSE-NEXT: psrlw $8, %xmm1
381 ; SSE-NEXT: paddw %xmm3, %xmm1
382 ; SSE-NEXT: pcmpeqw %xmm2, %xmm0
383 ; SSE-NEXT: psrld $16, %xmm0
384 ; SSE-NEXT: pand %xmm1, %xmm0
385 ; SSE-NEXT: psrld $16, %xmm1
386 ; SSE-NEXT: paddd %xmm0, %xmm1
387 ; SSE-NEXT: psrld $5, %xmm1
388 ; SSE-NEXT: movdqa %xmm1, %xmm0
391 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit1:
393 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
394 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
395 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
396 ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
397 ; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
398 ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm4
399 ; AVX-NEXT: vpand %xmm1, %xmm4, %xmm1
400 ; AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
401 ; AVX-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5
402 ; AVX-NEXT: vpand %xmm5, %xmm2, %xmm2
403 ; AVX-NEXT: vpshufb %xmm1, %xmm3, %xmm1
404 ; AVX-NEXT: vpaddb %xmm1, %xmm2, %xmm1
405 ; AVX-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2
406 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
407 ; AVX-NEXT: vpand %xmm2, %xmm1, %xmm2
408 ; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1
409 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
410 ; AVX-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
411 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
412 ; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
413 ; AVX-NEXT: vpsrld $16, %xmm1, %xmm1
414 ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
415 ; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
417 %1 = and <4 x i32> %x, <i32 4, i32 32, i32 64, i32 128>
418 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
419 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
422 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
424 ; fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
425 define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
426 ; SSE-LABEL: combine_vec_lshr_trunc_and:
428 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
429 ; SSE-NEXT: andps {{.*}}(%rip), %xmm1
430 ; SSE-NEXT: movaps %xmm1, %xmm2
431 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
432 ; SSE-NEXT: movdqa %xmm0, %xmm3
433 ; SSE-NEXT: psrld %xmm2, %xmm3
434 ; SSE-NEXT: movaps %xmm1, %xmm2
435 ; SSE-NEXT: psrlq $32, %xmm2
436 ; SSE-NEXT: movdqa %xmm0, %xmm4
437 ; SSE-NEXT: psrld %xmm2, %xmm4
438 ; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
439 ; SSE-NEXT: pxor %xmm2, %xmm2
440 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
441 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
442 ; SSE-NEXT: movdqa %xmm0, %xmm2
443 ; SSE-NEXT: psrld %xmm1, %xmm2
444 ; SSE-NEXT: psrld %xmm3, %xmm0
445 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
446 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
449 ; AVX-LABEL: combine_vec_lshr_trunc_and:
451 ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
452 ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
453 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
454 ; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
455 ; AVX-NEXT: vzeroupper
457 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
458 %2 = trunc <4 x i64> %1 to <4 x i32>
459 %3 = lshr <4 x i32> %x, %2