1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,XOP
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
7 ; fold (rot (rot x, c1), c2) -> rot x, c1+c2
8 define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
9 ; SSE2-LABEL: combine_vec_rot_rot:
11 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [524288,131072,32768,8192]
12 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
13 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
14 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
15 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
16 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
17 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
18 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
19 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
20 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
21 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
22 ; SSE2-NEXT: por %xmm3, %xmm0
25 ; XOP-LABEL: combine_vec_rot_rot:
27 ; XOP-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
30 ; AVX2-LABEL: combine_vec_rot_rot:
32 ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
33 ; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
34 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
37 ; AVX512-LABEL: combine_vec_rot_rot:
39 ; AVX512-NEXT: vprolvd {{.*}}(%rip), %xmm0, %xmm0
41 %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
42 %2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
43 %3 = or <4 x i32> %1, %2
44 %4 = lshr <4 x i32> %3, <i32 12, i32 13, i32 14, i32 15>
45 %5 = shl <4 x i32> %3, <i32 20, i32 19, i32 18, i32 17>
46 %6 = or <4 x i32> %4, %5
50 define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
51 ; SSE2-LABEL: combine_vec_rot_rot_splat:
53 ; SSE2-NEXT: movdqa %xmm0, %xmm1
54 ; SSE2-NEXT: psrld $25, %xmm1
55 ; SSE2-NEXT: pslld $7, %xmm0
56 ; SSE2-NEXT: por %xmm1, %xmm0
59 ; XOP-LABEL: combine_vec_rot_rot_splat:
61 ; XOP-NEXT: vprotd $7, %xmm0, %xmm0
64 ; AVX2-LABEL: combine_vec_rot_rot_splat:
66 ; AVX2-NEXT: vpsrld $25, %xmm0, %xmm1
67 ; AVX2-NEXT: vpslld $7, %xmm0, %xmm0
68 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
71 ; AVX512-LABEL: combine_vec_rot_rot_splat:
73 ; AVX512-NEXT: vprold $7, %xmm0, %xmm0
75 %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
76 %2 = shl <4 x i32> %x, <i32 29, i32 29, i32 29, i32 29>
77 %3 = or <4 x i32> %1, %2
78 %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
79 %5 = shl <4 x i32> %3, <i32 10, i32 10, i32 10, i32 10>
80 %6 = or <4 x i32> %4, %5
84 define <4 x i32> @combine_vec_rot_rot_splat_zero(<4 x i32> %x) {
85 ; CHECK-LABEL: combine_vec_rot_rot_splat_zero:
88 %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
89 %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
90 %3 = or <4 x i32> %1, %2
91 %4 = lshr <4 x i32> %3, <i32 31, i32 31, i32 31, i32 31>
92 %5 = shl <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1>
93 %6 = or <4 x i32> %4, %5
97 ; TODO - fold (select (icmp eq c, 0), x, (rot x, c)) -> rot x, c
98 define i32 @combine_rot_select_zero(i32, i32) {
99 ; CHECK-LABEL: combine_rot_select_zero:
101 ; CHECK-NEXT: movl %esi, %ecx
102 ; CHECK-NEXT: movl %edi, %eax
103 ; CHECK-NEXT: roll %cl, %eax
104 ; CHECK-NEXT: testl %esi, %esi
105 ; CHECK-NEXT: cmovel %edi, %eax
113 %9 = icmp eq i32 %1, 0
114 %10 = select i1 %9, i32 %0, i32 %8
118 define <4 x i32> @combine_vec_rot_select_zero(<4 x i32>, <4 x i32>) {
119 ; SSE2-LABEL: combine_vec_rot_select_zero:
121 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31]
122 ; SSE2-NEXT: pand %xmm1, %xmm2
123 ; SSE2-NEXT: pxor %xmm3, %xmm3
124 ; SSE2-NEXT: pslld $23, %xmm2
125 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm2
126 ; SSE2-NEXT: cvttps2dq %xmm2, %xmm2
127 ; SSE2-NEXT: movdqa %xmm0, %xmm4
128 ; SSE2-NEXT: pmuludq %xmm2, %xmm4
129 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
130 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
131 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
132 ; SSE2-NEXT: pmuludq %xmm6, %xmm2
133 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,3,2,3]
134 ; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
135 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
136 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
137 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
138 ; SSE2-NEXT: por %xmm5, %xmm4
139 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm3
140 ; SSE2-NEXT: pand %xmm3, %xmm0
141 ; SSE2-NEXT: pandn %xmm4, %xmm3
142 ; SSE2-NEXT: por %xmm3, %xmm0
145 ; XOP-LABEL: combine_vec_rot_select_zero:
147 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2
148 ; XOP-NEXT: vpxor %xmm3, %xmm3, %xmm3
149 ; XOP-NEXT: vprotd %xmm2, %xmm0, %xmm2
150 ; XOP-NEXT: vpcomeqd %xmm3, %xmm1, %xmm1
151 ; XOP-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
154 ; AVX2-LABEL: combine_vec_rot_select_zero:
156 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
157 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm2
158 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
159 ; AVX2-NEXT: vpsllvd %xmm2, %xmm0, %xmm4
160 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm5 = [32,32,32,32]
161 ; AVX2-NEXT: vpsubd %xmm2, %xmm5, %xmm2
162 ; AVX2-NEXT: vpsrlvd %xmm2, %xmm0, %xmm2
163 ; AVX2-NEXT: vpor %xmm2, %xmm4, %xmm2
164 ; AVX2-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm1
165 ; AVX2-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
168 ; AVX512-LABEL: combine_vec_rot_select_zero:
170 ; AVX512-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm2
171 ; AVX512-NEXT: vprolvd %xmm2, %xmm0, %xmm2
172 ; AVX512-NEXT: vptestnmd %xmm1, %xmm1, %k1
173 ; AVX512-NEXT: vmovdqa32 %xmm0, %xmm2 {%k1}
174 ; AVX512-NEXT: vmovdqa %xmm2, %xmm0
176 %3 = and <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
177 %4 = shl <4 x i32> %0, %3
178 %5 = sub <4 x i32> zeroinitializer, %1
179 %6 = and <4 x i32> %5, <i32 31, i32 31, i32 31, i32 31>
180 %7 = lshr <4 x i32> %0, %6
181 %8 = or <4 x i32> %4, %7
182 %9 = icmp eq <4 x i32> %1, zeroinitializer
183 %10 = select <4 x i1> %9, <4 x i32> %0, <4 x i32> %8
187 define <4 x i32> @rotate_demanded_bits(<4 x i32>, <4 x i32>) {
188 ; SSE2-LABEL: rotate_demanded_bits:
190 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
191 ; SSE2-NEXT: pslld $23, %xmm1
192 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
193 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
194 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
195 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
196 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
197 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
198 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
199 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
200 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
201 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
202 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
203 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
204 ; SSE2-NEXT: por %xmm3, %xmm0
207 ; XOP-LABEL: rotate_demanded_bits:
209 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
210 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
213 ; AVX2-LABEL: rotate_demanded_bits:
215 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [30,30,30,30]
216 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
217 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
218 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
219 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
220 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
221 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
224 ; AVX512-LABEL: rotate_demanded_bits:
226 ; AVX512-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
227 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0
229 %3 = and <4 x i32> %1, <i32 30, i32 30, i32 30, i32 30>
230 %4 = shl <4 x i32> %0, %3
231 %5 = sub nsw <4 x i32> zeroinitializer, %3
232 %6 = and <4 x i32> %5, <i32 30, i32 30, i32 30, i32 30>
233 %7 = lshr <4 x i32> %0, %6
234 %8 = or <4 x i32> %7, %4
238 define <4 x i32> @rotate_demanded_bits_2(<4 x i32>, <4 x i32>) {
239 ; SSE2-LABEL: rotate_demanded_bits_2:
241 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
242 ; SSE2-NEXT: pslld $23, %xmm1
243 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
244 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
245 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
246 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
247 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
248 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
249 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
250 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
251 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
252 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
253 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
254 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
255 ; SSE2-NEXT: por %xmm3, %xmm0
258 ; XOP-LABEL: rotate_demanded_bits_2:
260 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
261 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
264 ; AVX2-LABEL: rotate_demanded_bits_2:
266 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [23,23,23,23]
267 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
268 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
269 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
270 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
271 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
272 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
275 ; AVX512-LABEL: rotate_demanded_bits_2:
277 ; AVX512-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
278 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0
280 %3 = and <4 x i32> %1, <i32 23, i32 23, i32 23, i32 23>
281 %4 = shl <4 x i32> %0, %3
282 %5 = sub nsw <4 x i32> zeroinitializer, %3
283 %6 = and <4 x i32> %5, <i32 31, i32 31, i32 31, i32 31>
284 %7 = lshr <4 x i32> %0, %6
285 %8 = or <4 x i32> %7, %4
289 define <4 x i32> @rotate_demanded_bits_3(<4 x i32>, <4 x i32>) {
290 ; SSE2-LABEL: rotate_demanded_bits_3:
292 ; SSE2-NEXT: paddd %xmm1, %xmm1
293 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
294 ; SSE2-NEXT: pslld $23, %xmm1
295 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
296 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
297 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
298 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
299 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
300 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
301 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
302 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
303 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
304 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
305 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
306 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
307 ; SSE2-NEXT: por %xmm3, %xmm0
310 ; XOP-LABEL: rotate_demanded_bits_3:
312 ; XOP-NEXT: vpaddd %xmm1, %xmm1, %xmm1
313 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
314 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
317 ; AVX2-LABEL: rotate_demanded_bits_3:
319 ; AVX2-NEXT: vpaddd %xmm1, %xmm1, %xmm1
320 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [30,30,30,30]
321 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
322 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
323 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
324 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
325 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
326 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
329 ; AVX512-LABEL: rotate_demanded_bits_3:
331 ; AVX512-NEXT: vpaddd %xmm1, %xmm1, %xmm1
332 ; AVX512-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
333 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0
335 %3 = shl <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
336 %4 = and <4 x i32> %3, <i32 30, i32 30, i32 30, i32 30>
337 %5 = shl <4 x i32> %0, %4
338 %6 = sub <4 x i32> zeroinitializer, %3
339 %7 = and <4 x i32> %6, <i32 30, i32 30, i32 30, i32 30>
340 %8 = lshr <4 x i32> %0, %7
341 %9 = or <4 x i32> %5, %8
345 ; OSS Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9935
346 define i32 @fuzz9935() {
347 ; CHECK-LABEL: fuzz9935:
349 ; CHECK-NEXT: movl $-1, %eax
351 %1 = trunc i40 549755813887 to i32