1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-SLOW
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-FAST-ALL
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-FAST-PERLANE
8 define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
9 ; SSE-LABEL: combine_vec_lshr_zero:
11 ; SSE-NEXT: xorps %xmm0, %xmm0
14 ; AVX-LABEL: combine_vec_lshr_zero:
16 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
18 %1 = lshr <4 x i32> zeroinitializer, %x
22 ; fold (srl x, c >= size(x)) -> undef
23 define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) {
24 ; CHECK-LABEL: combine_vec_lshr_outofrange0:
27 %1 = lshr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
31 define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) {
32 ; CHECK-LABEL: combine_vec_lshr_outofrange1:
35 %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
39 define <4 x i32> @combine_vec_lshr_outofrange2(<4 x i32> %x) {
40 ; CHECK-LABEL: combine_vec_lshr_outofrange2:
43 %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 undef>
47 ; fold (srl x, 0) -> x
48 define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
49 ; CHECK-LABEL: combine_vec_lshr_by_zero:
52 %1 = lshr <4 x i32> %x, zeroinitializer
56 ; if (srl x, c) is known to be zero, return 0
57 define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) {
58 ; SSE-LABEL: combine_vec_lshr_known_zero0:
60 ; SSE-NEXT: xorps %xmm0, %xmm0
63 ; AVX-LABEL: combine_vec_lshr_known_zero0:
65 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
67 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
68 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
72 define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
73 ; SSE-LABEL: combine_vec_lshr_known_zero1:
75 ; SSE-NEXT: xorps %xmm0, %xmm0
78 ; AVX-LABEL: combine_vec_lshr_known_zero1:
80 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
82 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
83 %2 = lshr <4 x i32> %1, <i32 8, i32 9, i32 10, i32 11>
87 ; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
88 define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) {
89 ; SSE-LABEL: combine_vec_lshr_lshr0:
91 ; SSE-NEXT: psrld $6, %xmm0
94 ; AVX-LABEL: combine_vec_lshr_lshr0:
96 ; AVX-NEXT: vpsrld $6, %xmm0, %xmm0
98 %1 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
99 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
103 define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
104 ; SSE-LABEL: combine_vec_lshr_lshr1:
106 ; SSE-NEXT: movdqa %xmm0, %xmm1
107 ; SSE-NEXT: psrld $10, %xmm1
108 ; SSE-NEXT: movdqa %xmm0, %xmm2
109 ; SSE-NEXT: psrld $6, %xmm2
110 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
111 ; SSE-NEXT: movdqa %xmm0, %xmm1
112 ; SSE-NEXT: psrld $8, %xmm1
113 ; SSE-NEXT: psrld $4, %xmm0
114 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
115 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
118 ; AVX-LABEL: combine_vec_lshr_lshr1:
120 ; AVX-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
122 %1 = lshr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
123 %2 = lshr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
127 ; fold (srl (srl x, c1), c2) -> 0
128 define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) {
129 ; SSE-LABEL: combine_vec_lshr_lshr_zero0:
131 ; SSE-NEXT: xorps %xmm0, %xmm0
134 ; AVX-LABEL: combine_vec_lshr_lshr_zero0:
136 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
138 %1 = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
139 %2 = lshr <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
143 define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) {
144 ; SSE-LABEL: combine_vec_lshr_lshr_zero1:
146 ; SSE-NEXT: xorps %xmm0, %xmm0
149 ; AVX-LABEL: combine_vec_lshr_lshr_zero1:
151 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
153 %1 = lshr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
154 %2 = lshr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
158 ; fold (srl (trunc (srl x, c1)), c2) -> (trunc (srl x, (add c1, c2)))
159 define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
160 ; SSE-LABEL: combine_vec_lshr_trunc_lshr0:
162 ; SSE-NEXT: psrlq $48, %xmm1
163 ; SSE-NEXT: psrlq $48, %xmm0
164 ; SSE-NEXT: packusdw %xmm1, %xmm0
167 ; AVX2-SLOW-LABEL: combine_vec_lshr_trunc_lshr0:
168 ; AVX2-SLOW: # %bb.0:
169 ; AVX2-SLOW-NEXT: vpsrlq $48, %ymm0, %ymm0
170 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
171 ; AVX2-SLOW-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
172 ; AVX2-SLOW-NEXT: vzeroupper
173 ; AVX2-SLOW-NEXT: retq
175 ; AVX2-FAST-ALL-LABEL: combine_vec_lshr_trunc_lshr0:
176 ; AVX2-FAST-ALL: # %bb.0:
177 ; AVX2-FAST-ALL-NEXT: vpsrlq $48, %ymm0, %ymm0
178 ; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
179 ; AVX2-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1]
180 ; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0
181 ; AVX2-FAST-ALL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
182 ; AVX2-FAST-ALL-NEXT: vzeroupper
183 ; AVX2-FAST-ALL-NEXT: retq
185 ; AVX2-FAST-PERLANE-LABEL: combine_vec_lshr_trunc_lshr0:
186 ; AVX2-FAST-PERLANE: # %bb.0:
187 ; AVX2-FAST-PERLANE-NEXT: vpsrlq $48, %ymm0, %ymm0
188 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm1
189 ; AVX2-FAST-PERLANE-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
190 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
191 ; AVX2-FAST-PERLANE-NEXT: retq
192 %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
193 %2 = trunc <4 x i64> %1 to <4 x i32>
194 %3 = lshr <4 x i32> %2, <i32 16, i32 16, i32 16, i32 16>
198 define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
199 ; SSE-LABEL: combine_vec_lshr_trunc_lshr1:
201 ; SSE-NEXT: movdqa %xmm1, %xmm2
202 ; SSE-NEXT: psrlq $35, %xmm2
203 ; SSE-NEXT: psrlq $34, %xmm1
204 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
205 ; SSE-NEXT: movdqa %xmm0, %xmm2
206 ; SSE-NEXT: psrlq $33, %xmm2
207 ; SSE-NEXT: psrlq $32, %xmm0
208 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
209 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
210 ; SSE-NEXT: movaps %xmm2, %xmm1
211 ; SSE-NEXT: psrld $19, %xmm1
212 ; SSE-NEXT: movaps %xmm2, %xmm3
213 ; SSE-NEXT: psrld $17, %xmm3
214 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
215 ; SSE-NEXT: psrld $18, %xmm2
216 ; SSE-NEXT: psrld $16, %xmm0
217 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
218 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
221 ; AVX2-SLOW-LABEL: combine_vec_lshr_trunc_lshr1:
222 ; AVX2-SLOW: # %bb.0:
223 ; AVX2-SLOW-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
224 ; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
225 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
226 ; AVX2-SLOW-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
227 ; AVX2-SLOW-NEXT: vzeroupper
228 ; AVX2-SLOW-NEXT: retq
230 ; AVX2-FAST-ALL-LABEL: combine_vec_lshr_trunc_lshr1:
231 ; AVX2-FAST-ALL: # %bb.0:
232 ; AVX2-FAST-ALL-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
233 ; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
234 ; AVX2-FAST-ALL-NEXT: # ymm1 = mem[0,1,0,1]
235 ; AVX2-FAST-ALL-NEXT: vpermd %ymm0, %ymm1, %ymm0
236 ; AVX2-FAST-ALL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
237 ; AVX2-FAST-ALL-NEXT: vzeroupper
238 ; AVX2-FAST-ALL-NEXT: retq
240 ; AVX2-FAST-PERLANE-LABEL: combine_vec_lshr_trunc_lshr1:
241 ; AVX2-FAST-PERLANE: # %bb.0:
242 ; AVX2-FAST-PERLANE-NEXT: vpsrlvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
243 ; AVX2-FAST-PERLANE-NEXT: vextracti128 $1, %ymm0, %xmm1
244 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
245 ; AVX2-FAST-PERLANE-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
246 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
247 ; AVX2-FAST-PERLANE-NEXT: retq
248 %1 = lshr <4 x i64> %x, <i64 32, i64 33, i64 34, i64 35>
249 %2 = trunc <4 x i64> %1 to <4 x i32>
250 %3 = lshr <4 x i32> %2, <i32 16, i32 17, i32 18, i32 19>
254 ; fold (srl (trunc (srl x, c1)), c2) -> 0
255 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) {
256 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero0:
258 ; SSE-NEXT: xorps %xmm0, %xmm0
261 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero0:
263 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
265 %1 = lshr <4 x i64> %x, <i64 48, i64 48, i64 48, i64 48>
266 %2 = trunc <4 x i64> %1 to <4 x i32>
267 %3 = lshr <4 x i32> %2, <i32 24, i32 24, i32 24, i32 24>
271 define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
272 ; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1:
274 ; SSE-NEXT: xorps %xmm0, %xmm0
277 ; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1:
279 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
281 %1 = lshr <4 x i64> %x, <i64 48, i64 49, i64 50, i64 51>
282 %2 = trunc <4 x i64> %1 to <4 x i32>
283 %3 = lshr <4 x i32> %2, <i32 24, i32 25, i32 26, i32 27>
287 ; fold (srl (shl x, c), c) -> (and x, cst2)
288 define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
289 ; SSE-LABEL: combine_vec_lshr_shl_mask0:
291 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
294 ; AVX-LABEL: combine_vec_lshr_shl_mask0:
296 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1073741823,1073741823,1073741823,1073741823]
297 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
299 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
300 %2 = lshr <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
304 define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) {
305 ; SSE-LABEL: combine_vec_lshr_shl_mask1:
307 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
310 ; AVX-LABEL: combine_vec_lshr_shl_mask1:
312 ; AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
314 %1 = shl <4 x i32> %x, <i32 2, i32 3, i32 4, i32 5>
315 %2 = lshr <4 x i32> %1, <i32 2, i32 3, i32 4, i32 5>
319 ; fold (srl (sra X, Y), 31) -> (srl X, 31)
320 define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) {
321 ; SSE-LABEL: combine_vec_lshr_ashr_sign:
323 ; SSE-NEXT: psrld $31, %xmm0
326 ; AVX-LABEL: combine_vec_lshr_ashr_sign:
328 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
330 %1 = ashr <4 x i32> %x, %y
331 %2 = lshr <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
335 ; fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
336 define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
337 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit0:
339 ; SSE-NEXT: psrld $4, %xmm0
340 ; SSE-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
343 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit0:
345 ; AVX-NEXT: vpsrld $4, %xmm0, %xmm0
346 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
347 ; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
349 %1 = and <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
350 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
351 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
355 define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) {
356 ; SSE-LABEL: combine_vec_lshr_lzcnt_bit1:
358 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
359 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
360 ; SSE-NEXT: movdqa %xmm1, %xmm2
361 ; SSE-NEXT: pshufb %xmm0, %xmm2
362 ; SSE-NEXT: psrlw $4, %xmm0
363 ; SSE-NEXT: pxor %xmm3, %xmm3
364 ; SSE-NEXT: pshufb %xmm0, %xmm1
365 ; SSE-NEXT: pcmpeqb %xmm3, %xmm0
366 ; SSE-NEXT: pand %xmm2, %xmm0
367 ; SSE-NEXT: paddb %xmm1, %xmm0
368 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
369 ; SSE-NEXT: pand %xmm0, %xmm1
370 ; SSE-NEXT: psrlw $8, %xmm0
371 ; SSE-NEXT: paddw %xmm1, %xmm0
372 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2],xmm3[3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
373 ; SSE-NEXT: psrld $16, %xmm0
374 ; SSE-NEXT: paddd %xmm3, %xmm0
375 ; SSE-NEXT: psrld $5, %xmm0
378 ; AVX-LABEL: combine_vec_lshr_lzcnt_bit1:
380 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
381 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
382 ; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm2
383 ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
384 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
385 ; AVX-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm4
386 ; AVX-NEXT: vpand %xmm4, %xmm2, %xmm2
387 ; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
388 ; AVX-NEXT: vpaddb %xmm0, %xmm2, %xmm0
389 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
390 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
391 ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
392 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm3[1],xmm0[2],xmm3[3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
393 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
394 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
395 ; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
397 %1 = and <4 x i32> %x, <i32 4, i32 32, i32 64, i32 128>
398 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
399 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
402 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
404 ; fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
405 define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
406 ; SSE-LABEL: combine_vec_lshr_trunc_and:
408 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
409 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
410 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
411 ; SSE-NEXT: movdqa %xmm0, %xmm3
412 ; SSE-NEXT: psrld %xmm2, %xmm3
413 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
414 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
415 ; SSE-NEXT: movdqa %xmm0, %xmm5
416 ; SSE-NEXT: psrld %xmm4, %xmm5
417 ; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
418 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
419 ; SSE-NEXT: movdqa %xmm0, %xmm3
420 ; SSE-NEXT: psrld %xmm1, %xmm3
421 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
422 ; SSE-NEXT: psrld %xmm1, %xmm0
423 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
424 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
427 ; AVX2-SLOW-LABEL: combine_vec_lshr_trunc_and:
428 ; AVX2-SLOW: # %bb.0:
429 ; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
430 ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
431 ; AVX2-SLOW-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
432 ; AVX2-SLOW-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
433 ; AVX2-SLOW-NEXT: vzeroupper
434 ; AVX2-SLOW-NEXT: retq
436 ; AVX2-FAST-ALL-LABEL: combine_vec_lshr_trunc_and:
437 ; AVX2-FAST-ALL: # %bb.0:
438 ; AVX2-FAST-ALL-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,2,4,6,0,2,4,6]
439 ; AVX2-FAST-ALL-NEXT: # ymm2 = mem[0,1,0,1]
440 ; AVX2-FAST-ALL-NEXT: vpermd %ymm1, %ymm2, %ymm1
441 ; AVX2-FAST-ALL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
442 ; AVX2-FAST-ALL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
443 ; AVX2-FAST-ALL-NEXT: vzeroupper
444 ; AVX2-FAST-ALL-NEXT: retq
446 ; AVX2-FAST-PERLANE-LABEL: combine_vec_lshr_trunc_and:
447 ; AVX2-FAST-PERLANE: # %bb.0:
448 ; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm1, %xmm2
449 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
450 ; AVX2-FAST-PERLANE-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
451 ; AVX2-FAST-PERLANE-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
452 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
453 ; AVX2-FAST-PERLANE-NEXT: retq
454 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
455 %2 = trunc <4 x i64> %1 to <4 x i32>
456 %3 = lshr <4 x i32> %x, %2