1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
6 define <4 x i32> @combine_vec_ashr_zero(<4 x i32> %x) {
7 ; SSE-LABEL: combine_vec_ashr_zero:
9 ; SSE-NEXT: xorps %xmm0, %xmm0
12 ; AVX-LABEL: combine_vec_ashr_zero:
14 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
16 %1 = ashr <4 x i32> zeroinitializer, %x
20 ; fold (sra -1, x) -> -1
21 define <4 x i32> @combine_vec_ashr_allones(<4 x i32> %x) {
22 ; SSE-LABEL: combine_vec_ashr_allones:
24 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
27 ; AVX-LABEL: combine_vec_ashr_allones:
29 ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
31 %1 = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
35 ; fold (sra x, c >= size(x)) -> undef
36 define <4 x i32> @combine_vec_ashr_outofrange0(<4 x i32> %x) {
37 ; SSE-LABEL: combine_vec_ashr_outofrange0:
41 ; AVX-LABEL: combine_vec_ashr_outofrange0:
44 %1 = ashr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
48 define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) {
49 ; SSE-LABEL: combine_vec_ashr_outofrange1:
53 ; AVX-LABEL: combine_vec_ashr_outofrange1:
56 %1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
60 ; fold (sra x, 0) -> x
61 define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) {
62 ; SSE-LABEL: combine_vec_ashr_by_zero:
66 ; AVX-LABEL: combine_vec_ashr_by_zero:
69 %1 = ashr <4 x i32> %x, zeroinitializer
73 ; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
74 define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) {
75 ; SSE-LABEL: combine_vec_ashr_ashr0:
77 ; SSE-NEXT: psrad $6, %xmm0
80 ; AVX-LABEL: combine_vec_ashr_ashr0:
82 ; AVX-NEXT: vpsrad $6, %xmm0, %xmm0
84 %1 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
85 %2 = ashr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
89 define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
90 ; SSE-LABEL: combine_vec_ashr_ashr1:
92 ; SSE-NEXT: movdqa %xmm0, %xmm1
93 ; SSE-NEXT: psrad $10, %xmm1
94 ; SSE-NEXT: movdqa %xmm0, %xmm2
95 ; SSE-NEXT: psrad $6, %xmm2
96 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
97 ; SSE-NEXT: movdqa %xmm0, %xmm1
98 ; SSE-NEXT: psrad $8, %xmm1
99 ; SSE-NEXT: psrad $4, %xmm0
100 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
101 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
104 ; AVX-LABEL: combine_vec_ashr_ashr1:
106 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
108 %1 = ashr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
109 %2 = ashr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
113 define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
114 ; SSE-LABEL: combine_vec_ashr_ashr2:
116 ; SSE-NEXT: psrad $31, %xmm0
119 ; AVX-LABEL: combine_vec_ashr_ashr2:
121 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
123 %1 = ashr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
124 %2 = ashr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
128 define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
129 ; SSE-LABEL: combine_vec_ashr_ashr3:
131 ; SSE-NEXT: movdqa %xmm0, %xmm1
132 ; SSE-NEXT: psrad $27, %xmm1
133 ; SSE-NEXT: movdqa %xmm0, %xmm2
134 ; SSE-NEXT: psrad $5, %xmm2
135 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
136 ; SSE-NEXT: movdqa %xmm0, %xmm1
137 ; SSE-NEXT: psrad $31, %xmm1
138 ; SSE-NEXT: psrad $1, %xmm0
139 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
140 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
141 ; SSE-NEXT: movdqa %xmm0, %xmm1
142 ; SSE-NEXT: psrad $10, %xmm1
143 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
144 ; SSE-NEXT: psrad $31, %xmm0
145 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
148 ; AVX-LABEL: combine_vec_ashr_ashr3:
150 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
151 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
153 %1 = ashr <4 x i32> %x, <i32 1, i32 5, i32 50, i32 27>
154 %2 = ashr <4 x i32> %1, <i32 33, i32 10, i32 33, i32 0>
158 ; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
159 define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
160 ; SSE-LABEL: combine_vec_ashr_trunc_and:
162 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
163 ; SSE-NEXT: andps {{.*}}(%rip), %xmm1
164 ; SSE-NEXT: movaps %xmm1, %xmm2
165 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
166 ; SSE-NEXT: movdqa %xmm0, %xmm3
167 ; SSE-NEXT: psrad %xmm2, %xmm3
168 ; SSE-NEXT: movaps %xmm1, %xmm2
169 ; SSE-NEXT: psrlq $32, %xmm2
170 ; SSE-NEXT: movdqa %xmm0, %xmm4
171 ; SSE-NEXT: psrad %xmm2, %xmm4
172 ; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
173 ; SSE-NEXT: pxor %xmm2, %xmm2
174 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
175 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
176 ; SSE-NEXT: movdqa %xmm0, %xmm2
177 ; SSE-NEXT: psrad %xmm1, %xmm2
178 ; SSE-NEXT: psrad %xmm3, %xmm0
179 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
180 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
183 ; AVX-LABEL: combine_vec_ashr_trunc_and:
185 ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
186 ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
187 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
188 ; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0
189 ; AVX-NEXT: vzeroupper
191 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
192 %2 = trunc <4 x i64> %1 to <4 x i32>
193 %3 = ashr <4 x i32> %x, %2
197 ; fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
198 ; if c1 is equal to the number of bits the trunc removes
199 define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
200 ; SSE-LABEL: combine_vec_ashr_trunc_lshr:
202 ; SSE-NEXT: psrlq $32, %xmm1
203 ; SSE-NEXT: psrlq $32, %xmm0
204 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
205 ; SSE-NEXT: movaps %xmm0, %xmm2
206 ; SSE-NEXT: movaps %xmm0, %xmm1
207 ; SSE-NEXT: psrad $2, %xmm1
208 ; SSE-NEXT: blendpd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
209 ; SSE-NEXT: psrad $3, %xmm0
210 ; SSE-NEXT: psrad $1, %xmm2
211 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
212 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
213 ; SSE-NEXT: movdqa %xmm1, %xmm0
216 ; AVX-LABEL: combine_vec_ashr_trunc_lshr:
218 ; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
219 ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
220 ; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
221 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
222 ; AVX-NEXT: vzeroupper
224 %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
225 %2 = trunc <4 x i64> %1 to <4 x i32>
226 %3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3>
230 ; fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
231 ; if c1 is equal to the number of bits the trunc removes
232 define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
233 ; SSE-LABEL: combine_vec_ashr_trunc_ashr:
235 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
236 ; SSE-NEXT: psrad $31, %xmm1
237 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
238 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,2]
239 ; SSE-NEXT: movaps %xmm0, %xmm2
240 ; SSE-NEXT: movaps %xmm0, %xmm1
241 ; SSE-NEXT: psrad $2, %xmm1
242 ; SSE-NEXT: blendpd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
243 ; SSE-NEXT: psrad $3, %xmm0
244 ; SSE-NEXT: psrad $1, %xmm2
245 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
246 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
247 ; SSE-NEXT: movdqa %xmm1, %xmm0
250 ; AVX-LABEL: combine_vec_ashr_trunc_ashr:
252 ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
253 ; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
254 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
255 ; AVX-NEXT: vzeroupper
257 %1 = ashr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
258 %2 = trunc <4 x i64> %1 to <4 x i32>
259 %3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3>
263 ; If the sign bit is known to be zero, switch this to a SRL.
264 define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
265 ; SSE-LABEL: combine_vec_ashr_positive:
267 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
268 ; SSE-NEXT: movdqa %xmm1, %xmm2
269 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
270 ; SSE-NEXT: movdqa %xmm0, %xmm3
271 ; SSE-NEXT: psrld %xmm2, %xmm3
272 ; SSE-NEXT: movdqa %xmm1, %xmm2
273 ; SSE-NEXT: psrlq $32, %xmm2
274 ; SSE-NEXT: movdqa %xmm0, %xmm4
275 ; SSE-NEXT: psrld %xmm2, %xmm4
276 ; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
277 ; SSE-NEXT: pxor %xmm2, %xmm2
278 ; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
279 ; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
280 ; SSE-NEXT: movdqa %xmm0, %xmm2
281 ; SSE-NEXT: psrld %xmm1, %xmm2
282 ; SSE-NEXT: psrld %xmm3, %xmm0
283 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
284 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
287 ; AVX-LABEL: combine_vec_ashr_positive:
289 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
290 ; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
292 %1 = and <4 x i32> %x, <i32 15, i32 255, i32 4095, i32 65535>
293 %2 = ashr <4 x i32> %1, %y
297 define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
298 ; SSE-LABEL: combine_vec_ashr_positive_splat:
300 ; SSE-NEXT: xorps %xmm0, %xmm0
303 ; AVX-LABEL: combine_vec_ashr_positive_splat:
305 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
307 %1 = and <4 x i32> %x, <i32 1023, i32 1023, i32 1023, i32 1023>
308 %2 = ashr <4 x i32> %1, <i32 10, i32 10, i32 10, i32 10>