1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-SLOW
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-FAST-ALL
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-FAST-PERLANE
8 define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
9 ; SSE-LABEL: combine_vec_lshr_zero:
11 ; SSE-NEXT: xorps %xmm0, %xmm0
14 ; AVX-LABEL: combine_vec_lshr_zero:
16 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
18 %1 = lshr <4 x i32> zeroinitializer, %x
22 ; fold (srl x, c >= size(x)) -> undef
23 define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) {
24 ; CHECK-LABEL: combine_vec_lshr_outofrange0:
27 %1 = lshr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
31 define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) {
32 ; CHECK-LABEL: combine_vec_lshr_outofrange1:
35 %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
39 define <4 x i32> @combine_vec_lshr_outofrange2(<4 x i32> %x) {
40 ; CHECK-LABEL: combine_vec_lshr_outofrange2:
43 %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 undef>
47 ; fold (srl x, 0) -> x
48 define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
49 ; CHECK-LABEL: combine_vec_lshr_by_zero:
52 %1 = lshr <4 x i32> %x, zeroinitializer
56 ; if (srl x, c) is known to be zero, return 0
57 define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) {
58 ; SSE-LABEL: combine_vec_lshr_known_zero0:
60 ; SSE-NEXT: xorps %xmm0, %xmm0
63 ; AVX-LABEL: combine_vec_lshr_known_zero0:
65 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
67 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
68 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
72 define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
73 ; SSE-LABEL: combine_vec_lshr_known_zero1:
75 ; SSE-NEXT: xorps %xmm0, %xmm0
78 ; AVX-LABEL: combine_vec_lshr_known_zero1:
80 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
81 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
82 ; AVX-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
84 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
85 %2 = lshr <4 x i32> %1, <i32 8, i32 9, i32 10, i32 11>
89 ; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
90 define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) {
91 ; SSE-LABEL: combine_vec_lshr_lshr0:
93 ; SSE-NEXT: psrld $6, %xmm0
96 ; AVX-LABEL: combine_vec_lshr_lshr0:
98 ; AVX-NEXT: vpsrld $6, %xmm0, %xmm0
100 %1 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
101 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
105 define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
106 ; SSE-LABEL: combine_vec_lshr_lshr1:
108 ; SSE-NEXT: movdqa %xmm0, %xmm1
109 ; SSE-NEXT: psrld $10, %xmm1
110 ; SSE-NEXT: movdqa %xmm0, %xmm2
111 ; SSE-NEXT: psrld $6, %xmm2
112 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
113 ; SSE-NEXT: movdqa %xmm0, %xmm1
114 ; SSE-NEXT: psrld $8, %xmm1
115 ; SSE-NEXT: psrld $4, %xmm0
116 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
117 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
120 ; AVX-LABEL: combine_vec_lshr_lshr1:
122 ; AVX-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
124 %1 = lshr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
125 %2 = lshr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
129 ; fold (srl (srl x, c1), c2) -> 0
130 define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) {
131 ; SSE-LABEL: combine_vec_lshr_lshr_zero0:
133 ; SSE-NEXT: xorps %xmm0, %xmm0
136 ; AVX-LABEL: combine_vec_lshr_lshr_zero0:
138 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
140 %1 = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
141 %2 = lshr <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
145 define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) {
146 ; SSE-LABEL: combine_vec_lshr_lshr_zero1:
148 ; SSE-NEXT: xorps %xmm0, %xmm0
151 ; AVX-LABEL: combine_vec_lshr_lshr_zero1:
153 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
155 %1 = lshr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
156 %2 = lshr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
160 ; fold (srl (trunc (srl x, c1)), c2) -> (trunc (srl x, (add c1, c2)))
161 define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
162 ; SSE-LABEL: combine_vec_lshr_trunc_lshr0:
164 ; SSE-NEXT: psrlq $48, %xmm1
165 ; SSE-NEXT: psrlq $48, %xmm0
166 ; SSE-NEXT: packusdw %xmm1, %xmm0
169 ; AVX-LABEL: combine_vec_lshr_trunc_lshr0:
171 ; AVX-NEXT: vpsrlq $48, %ymm0, %ymm0
172 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
173 ; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
174 ; AVX-NEXT: vzeroupper
176 %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
177 %2 = trunc <4 x i64> %1 to <4 x i32>
178 %3 = lshr <4 x i32> %2, <i32 16, i32 16, i32 16, i32 16>
182 define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
183 ; SSE-LABEL: combine_vec_lshr_trunc_lshr1:
185 ; SSE-NEXT: movdqa %xmm1, %xmm2
186 ; SSE-NEXT: psrlq $35, %xmm2
187 ; SSE-NEXT: psrlq $34, %xmm1
188 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
189 ; SSE-NEXT: movdqa %xmm0, %xmm2
190 ; SSE-NEXT: psrlq $33, %xmm2
191 ; SSE-NEXT: psrlq $32, %xmm0
192 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
193 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
194 ; SSE-NEXT: movaps %xmm2, %xmm1
195 ; SSE-NEXT: psrld $19, %xmm1
196 ; SSE-NEXT: movaps %xmm2, %xmm3
197 ; SSE-NEXT: psrld $17, %xmm3
198 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
199 ; SSE-NEXT: psrld $18, %xmm2
200 ; SSE-NEXT: psrld $16, %xmm0
201 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
202 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
205 ; AVX2-SLOW-LABEL: combine_vec_lshr_trunc_lshr1:
206 ; AVX2-SLOW: # %bb.0:
207 ; AVX2-SLOW-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
208 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
209 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
210 ; AVX2-SLOW-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
211 ; AVX2-SLOW-NEXT: vzeroupper
212 ; AVX2-SLOW-NEXT: retq
214 ; AVX2-FAST-ALL-LABEL: combine_vec_lshr_trunc_lshr1:
215 ; AVX2-FAST-ALL: # %bb.0:
216 ; AVX2-FAST-ALL-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
217 ; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm1 = <0,2,4,6,u,u,u,u>
218 ; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0
219 ; AVX2-FAST-ALL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
220 ; AVX2-FAST-ALL-NEXT: vzeroupper
221 ; AVX2-FAST-ALL-NEXT: retq
223 ; AVX2-FAST-PERLANE-LABEL: combine_vec_lshr_trunc_lshr1:
224 ; AVX2-FAST-PERLANE: # %bb.0:
225 ; AVX2-FAST-PERLANE-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
226 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm1
227 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
228 ; AVX2-FAST-PERLANE-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
229 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
230 ; AVX2-FAST-PERLANE-NEXT: retq
231 %1 = lshr <4 x i64> %x, <i64 32, i64 33, i64 34, i64 35>
232 %2 = trunc <4 x i64> %1 to <4 x i32>
233 %3 = lshr <4 x i32> %2, <i32 16, i32 17, i32 18, i32 19>
237 ; fold (srl (trunc (srl x, c1)), c2) -> 0
238 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) {
239 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero0:
241 ; SSE-NEXT: xorps %xmm0, %xmm0
244 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero0:
246 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
248 %1 = lshr <4 x i64> %x, <i64 48, i64 48, i64 48, i64 48>
249 %2 = trunc <4 x i64> %1 to <4 x i32>
250 %3 = lshr <4 x i32> %2, <i32 24, i32 24, i32 24, i32 24>
254 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
255 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1:
257 ; SSE-NEXT: xorps %xmm0, %xmm0
260 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1:
262 ; AVX-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
263 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
264 ; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
265 ; AVX-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
266 ; AVX-NEXT: vzeroupper
268 %1 = lshr <4 x i64> %x, <i64 48, i64 49, i64 50, i64 51>
269 %2 = trunc <4 x i64> %1 to <4 x i32>
270 %3 = lshr <4 x i32> %2, <i32 24, i32 25, i32 26, i32 27>
274 ; fold (srl (shl x, c), c) -> (and x, cst2)
275 define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
276 ; SSE-LABEL: combine_vec_lshr_shl_mask0:
278 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
281 ; AVX-LABEL: combine_vec_lshr_shl_mask0:
283 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1073741823,1073741823,1073741823,1073741823]
284 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
286 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
287 %2 = lshr <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
291 define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) {
292 ; SSE-LABEL: combine_vec_lshr_shl_mask1:
294 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
297 ; AVX-LABEL: combine_vec_lshr_shl_mask1:
299 ; AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
301 %1 = shl <4 x i32> %x, <i32 2, i32 3, i32 4, i32 5>
302 %2 = lshr <4 x i32> %1, <i32 2, i32 3, i32 4, i32 5>
306 ; fold (srl (sra X, Y), 31) -> (srl X, 31)
307 define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) {
308 ; SSE-LABEL: combine_vec_lshr_ashr_sign:
310 ; SSE-NEXT: psrld $31, %xmm0
313 ; AVX-LABEL: combine_vec_lshr_ashr_sign:
315 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
317 %1 = ashr <4 x i32> %x, %y
318 %2 = lshr <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
322 ; fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
323 define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
324 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit0:
326 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
327 ; SSE-NEXT: psrld $4, %xmm0
328 ; SSE-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
331 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit0:
333 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16]
334 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
335 ; AVX-NEXT: vpsrld $4, %xmm0, %xmm0
336 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
337 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
339 %1 = and <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
340 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
341 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
345 define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) {
346 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit1:
348 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
349 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
350 ; SSE-NEXT: movdqa %xmm2, %xmm3
351 ; SSE-NEXT: pshufb %xmm0, %xmm3
352 ; SSE-NEXT: movdqa %xmm0, %xmm1
353 ; SSE-NEXT: psrlw $4, %xmm1
354 ; SSE-NEXT: pxor %xmm4, %xmm4
355 ; SSE-NEXT: pshufb %xmm1, %xmm2
356 ; SSE-NEXT: pcmpeqb %xmm4, %xmm1
357 ; SSE-NEXT: pand %xmm3, %xmm1
358 ; SSE-NEXT: paddb %xmm2, %xmm1
359 ; SSE-NEXT: movdqa %xmm0, %xmm2
360 ; SSE-NEXT: pcmpeqb %xmm4, %xmm2
361 ; SSE-NEXT: psrlw $8, %xmm2
362 ; SSE-NEXT: pand %xmm1, %xmm2
363 ; SSE-NEXT: psrlw $8, %xmm1
364 ; SSE-NEXT: paddw %xmm2, %xmm1
365 ; SSE-NEXT: pcmpeqw %xmm4, %xmm0
366 ; SSE-NEXT: psrld $16, %xmm0
367 ; SSE-NEXT: pand %xmm1, %xmm0
368 ; SSE-NEXT: psrld $16, %xmm1
369 ; SSE-NEXT: paddd %xmm0, %xmm1
370 ; SSE-NEXT: psrld $5, %xmm1
371 ; SSE-NEXT: movdqa %xmm1, %xmm0
374 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit1:
376 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
377 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
378 ; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm2
379 ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm3
380 ; AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
381 ; AVX-NEXT: vpcmpeqb %xmm4, %xmm3, %xmm5
382 ; AVX-NEXT: vpand %xmm5, %xmm2, %xmm2
383 ; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1
384 ; AVX-NEXT: vpaddb %xmm1, %xmm2, %xmm1
385 ; AVX-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2
386 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
387 ; AVX-NEXT: vpand %xmm2, %xmm1, %xmm2
388 ; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1
389 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
390 ; AVX-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
391 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
392 ; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
393 ; AVX-NEXT: vpsrld $16, %xmm1, %xmm1
394 ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
395 ; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
397 %1 = and <4 x i32> %x, <i32 4, i32 32, i32 64, i32 128>
398 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
399 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
402 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
404 ; fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
405 define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
406 ; SSE-LABEL: combine_vec_lshr_trunc_and:
408 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
409 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
410 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
411 ; SSE-NEXT: movdqa %xmm0, %xmm3
412 ; SSE-NEXT: psrld %xmm2, %xmm3
413 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
414 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
415 ; SSE-NEXT: movdqa %xmm0, %xmm5
416 ; SSE-NEXT: psrld %xmm4, %xmm5
417 ; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
418 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
419 ; SSE-NEXT: movdqa %xmm0, %xmm3
420 ; SSE-NEXT: psrld %xmm1, %xmm3
421 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
422 ; SSE-NEXT: psrld %xmm1, %xmm0
423 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
424 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
427 ; AVX2-SLOW-LABEL: combine_vec_lshr_trunc_and:
428 ; AVX2-SLOW: # %bb.0:
429 ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
430 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
431 ; AVX2-SLOW-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
432 ; AVX2-SLOW-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
433 ; AVX2-SLOW-NEXT: vzeroupper
434 ; AVX2-SLOW-NEXT: retq
436 ; AVX2-FAST-ALL-LABEL: combine_vec_lshr_trunc_and:
437 ; AVX2-FAST-ALL: # %bb.0:
438 ; AVX2-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm2 = <0,2,4,6,u,u,u,u>
439 ; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm2, %ymm1
440 ; AVX2-FAST-ALL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
441 ; AVX2-FAST-ALL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
442 ; AVX2-FAST-ALL-NEXT: vzeroupper
443 ; AVX2-FAST-ALL-NEXT: retq
445 ; AVX2-FAST-PERLANE-LABEL: combine_vec_lshr_trunc_and:
446 ; AVX2-FAST-PERLANE: # %bb.0:
447 ; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm1, %xmm2
448 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
449 ; AVX2-FAST-PERLANE-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
450 ; AVX2-FAST-PERLANE-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
451 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
452 ; AVX2-FAST-PERLANE-NEXT: retq
453 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
454 %2 = trunc <4 x i64> %1 to <4 x i32>
455 %3 = lshr <4 x i32> %x, %2