1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,XOP
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
7 ; fold (rot (rot x, c1), c2) -> rot x, c1+c2
8 define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
9 ; SSE2-LABEL: combine_vec_rot_rot:
11 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
12 ; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
13 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
14 ; SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
15 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
16 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
17 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
18 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
19 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
20 ; SSE2-NEXT: por %xmm2, %xmm0
23 ; XOP-LABEL: combine_vec_rot_rot:
25 ; XOP-NEXT: vprotd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
28 ; AVX2-LABEL: combine_vec_rot_rot:
30 ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
31 ; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
32 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
35 ; AVX512-LABEL: combine_vec_rot_rot:
37 ; AVX512-NEXT: vprolvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
39 %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
40 %2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
41 %3 = or <4 x i32> %1, %2
42 %4 = lshr <4 x i32> %3, <i32 12, i32 13, i32 14, i32 15>
43 %5 = shl <4 x i32> %3, <i32 20, i32 19, i32 18, i32 17>
44 %6 = or <4 x i32> %4, %5
48 define <4 x i32> @combine_vec_rot_rot_splat(<4 x i32> %x) {
49 ; SSE2-LABEL: combine_vec_rot_rot_splat:
51 ; SSE2-NEXT: movdqa %xmm0, %xmm1
52 ; SSE2-NEXT: psrld $25, %xmm1
53 ; SSE2-NEXT: pslld $7, %xmm0
54 ; SSE2-NEXT: por %xmm1, %xmm0
57 ; XOP-LABEL: combine_vec_rot_rot_splat:
59 ; XOP-NEXT: vprotd $7, %xmm0, %xmm0
62 ; AVX2-LABEL: combine_vec_rot_rot_splat:
64 ; AVX2-NEXT: vpsrld $25, %xmm0, %xmm1
65 ; AVX2-NEXT: vpslld $7, %xmm0, %xmm0
66 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
69 ; AVX512-LABEL: combine_vec_rot_rot_splat:
71 ; AVX512-NEXT: vprold $7, %xmm0, %xmm0
73 %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
74 %2 = shl <4 x i32> %x, <i32 29, i32 29, i32 29, i32 29>
75 %3 = or <4 x i32> %1, %2
76 %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
77 %5 = shl <4 x i32> %3, <i32 10, i32 10, i32 10, i32 10>
78 %6 = or <4 x i32> %4, %5
82 define <4 x i32> @combine_vec_rot_rot_splat_zero(<4 x i32> %x) {
83 ; CHECK-LABEL: combine_vec_rot_rot_splat_zero:
86 %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
87 %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
88 %3 = or <4 x i32> %1, %2
89 %4 = lshr <4 x i32> %3, <i32 31, i32 31, i32 31, i32 31>
90 %5 = shl <4 x i32> %3, <i32 1, i32 1, i32 1, i32 1>
91 %6 = or <4 x i32> %4, %5
95 ; TODO - fold (select (icmp eq c, 0), x, (rot x, c)) -> rot x, c
96 define i32 @combine_rot_select_zero(i32, i32) {
97 ; CHECK-LABEL: combine_rot_select_zero:
99 ; CHECK-NEXT: movl %esi, %ecx
100 ; CHECK-NEXT: movl %edi, %eax
101 ; CHECK-NEXT: roll %cl, %eax
102 ; CHECK-NEXT: testl %esi, %esi
103 ; CHECK-NEXT: cmovel %edi, %eax
111 %9 = icmp eq i32 %1, 0
112 %10 = select i1 %9, i32 %0, i32 %8
116 define <4 x i32> @combine_vec_rot_select_zero(<4 x i32>, <4 x i32>) {
117 ; SSE2-LABEL: combine_vec_rot_select_zero:
119 ; SSE2-NEXT: pxor %xmm2, %xmm2
120 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm2
121 ; SSE2-NEXT: pslld $23, %xmm1
122 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
123 ; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
124 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
125 ; SSE2-NEXT: movdqa %xmm0, %xmm3
126 ; SSE2-NEXT: pmuludq %xmm1, %xmm3
127 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,2,3]
128 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
129 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
130 ; SSE2-NEXT: pmuludq %xmm5, %xmm1
131 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3]
132 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
133 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
134 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
135 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
136 ; SSE2-NEXT: por %xmm4, %xmm3
137 ; SSE2-NEXT: pand %xmm2, %xmm0
138 ; SSE2-NEXT: pandn %xmm3, %xmm2
139 ; SSE2-NEXT: por %xmm2, %xmm0
142 ; XOP-LABEL: combine_vec_rot_select_zero:
144 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
145 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm3
146 ; XOP-NEXT: vpcomeqd %xmm2, %xmm1, %xmm1
147 ; XOP-NEXT: vblendvps %xmm1, %xmm0, %xmm3, %xmm0
150 ; AVX2-LABEL: combine_vec_rot_select_zero:
152 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
153 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [31,31,31,31]
154 ; AVX2-NEXT: vpand %xmm3, %xmm1, %xmm3
155 ; AVX2-NEXT: vpsllvd %xmm3, %xmm0, %xmm4
156 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm5 = [32,32,32,32]
157 ; AVX2-NEXT: vpsubd %xmm3, %xmm5, %xmm3
158 ; AVX2-NEXT: vpsrlvd %xmm3, %xmm0, %xmm3
159 ; AVX2-NEXT: vpor %xmm3, %xmm4, %xmm3
160 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
161 ; AVX2-NEXT: vblendvps %xmm1, %xmm0, %xmm3, %xmm0
164 ; AVX512-LABEL: combine_vec_rot_select_zero:
166 ; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k1
167 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0 {%k1}
169 %3 = and <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
170 %4 = shl <4 x i32> %0, %3
171 %5 = sub <4 x i32> zeroinitializer, %1
172 %6 = and <4 x i32> %5, <i32 31, i32 31, i32 31, i32 31>
173 %7 = lshr <4 x i32> %0, %6
174 %8 = or <4 x i32> %4, %7
175 %9 = icmp eq <4 x i32> %1, zeroinitializer
176 %10 = select <4 x i1> %9, <4 x i32> %0, <4 x i32> %8
180 define <4 x i32> @rotate_demanded_bits(<4 x i32>, <4 x i32>) {
181 ; SSE2-LABEL: rotate_demanded_bits:
183 ; SSE2-NEXT: pslld $23, %xmm1
184 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
185 ; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
186 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
187 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
188 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
189 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
190 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
191 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
192 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
193 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
194 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
195 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
196 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
197 ; SSE2-NEXT: por %xmm3, %xmm0
200 ; XOP-LABEL: rotate_demanded_bits:
202 ; XOP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
203 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
206 ; AVX2-LABEL: rotate_demanded_bits:
208 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [30,30,30,30]
209 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
210 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
211 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
212 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
213 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
214 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
217 ; AVX512-LABEL: rotate_demanded_bits:
219 ; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
220 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0
222 %3 = and <4 x i32> %1, <i32 30, i32 30, i32 30, i32 30>
223 %4 = shl <4 x i32> %0, %3
224 %5 = sub nsw <4 x i32> zeroinitializer, %3
225 %6 = and <4 x i32> %5, <i32 30, i32 30, i32 30, i32 30>
226 %7 = lshr <4 x i32> %0, %6
227 %8 = or <4 x i32> %7, %4
231 define <4 x i32> @rotate_demanded_bits_2(<4 x i32>, <4 x i32>) {
232 ; SSE2-LABEL: rotate_demanded_bits_2:
234 ; SSE2-NEXT: pslld $23, %xmm1
235 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
236 ; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
237 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
238 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
239 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
240 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
241 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
242 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
243 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
244 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
245 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
246 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
247 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
248 ; SSE2-NEXT: por %xmm3, %xmm0
251 ; XOP-LABEL: rotate_demanded_bits_2:
253 ; XOP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
254 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
257 ; AVX2-LABEL: rotate_demanded_bits_2:
259 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [23,23,23,23]
260 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
261 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
262 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
263 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
264 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
265 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
268 ; AVX512-LABEL: rotate_demanded_bits_2:
270 ; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
271 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0
273 %3 = and <4 x i32> %1, <i32 23, i32 23, i32 23, i32 23>
274 %4 = shl <4 x i32> %0, %3
275 %5 = sub nsw <4 x i32> zeroinitializer, %3
276 %6 = and <4 x i32> %5, <i32 31, i32 31, i32 31, i32 31>
277 %7 = lshr <4 x i32> %0, %6
278 %8 = or <4 x i32> %7, %4
282 define <4 x i32> @rotate_demanded_bits_3(<4 x i32>, <4 x i32>) {
283 ; SSE2-LABEL: rotate_demanded_bits_3:
285 ; SSE2-NEXT: pslld $24, %xmm1
286 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
287 ; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
288 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
289 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
290 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
291 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
292 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
293 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
294 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
295 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
296 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
297 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
298 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
299 ; SSE2-NEXT: por %xmm3, %xmm0
302 ; XOP-LABEL: rotate_demanded_bits_3:
304 ; XOP-NEXT: vpaddd %xmm1, %xmm1, %xmm1
305 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
308 ; AVX2-LABEL: rotate_demanded_bits_3:
310 ; AVX2-NEXT: vpaddd %xmm1, %xmm1, %xmm1
311 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
312 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
313 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
314 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
315 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
316 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
317 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
320 ; AVX512-LABEL: rotate_demanded_bits_3:
322 ; AVX512-NEXT: vpaddd %xmm1, %xmm1, %xmm1
323 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0
325 %3 = shl <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
326 %4 = and <4 x i32> %3, <i32 30, i32 30, i32 30, i32 30>
327 %5 = shl <4 x i32> %0, %4
328 %6 = sub <4 x i32> zeroinitializer, %3
329 %7 = and <4 x i32> %6, <i32 30, i32 30, i32 30, i32 30>
330 %8 = lshr <4 x i32> %0, %7
331 %9 = or <4 x i32> %5, %8
335 define <4 x i32> @rotl_binop_shuffle(<4 x i32>, <4 x i32>) {
336 ; SSE2-LABEL: rotl_binop_shuffle:
338 ; SSE2-NEXT: pslld $23, %xmm1
339 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
340 ; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
341 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
342 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
343 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
344 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
345 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
346 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
347 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
348 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
349 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
350 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
351 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
352 ; SSE2-NEXT: por %xmm3, %xmm0
355 ; XOP-LABEL: rotl_binop_shuffle:
357 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
360 ; AVX2-LABEL: rotl_binop_shuffle:
362 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
363 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
364 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
365 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
366 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
367 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
368 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
371 ; AVX512-LABEL: rotl_binop_shuffle:
373 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0
375 %3 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
376 %4 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
377 %5 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %3, <4 x i32> %3, <4 x i32> %4)
378 %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
382 define <4 x i32> @rotr_binop_shuffle(<4 x i32>, <4 x i32>) {
383 ; SSE2-LABEL: rotr_binop_shuffle:
385 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
386 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
387 ; SSE2-NEXT: psllq %xmm1, %xmm2
388 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
389 ; SSE2-NEXT: psllq %xmm1, %xmm0
390 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
393 ; XOP-LABEL: rotr_binop_shuffle:
395 ; XOP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
396 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
399 ; AVX2-LABEL: rotr_binop_shuffle:
401 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
402 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
403 ; AVX2-NEXT: vpsllq %xmm1, %xmm2, %xmm2
404 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
405 ; AVX2-NEXT: vpsllq %xmm1, %xmm0, %xmm0
406 ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
409 ; AVX512-LABEL: rotr_binop_shuffle:
411 ; AVX512-NEXT: vpbroadcastd %xmm1, %xmm1
412 ; AVX512-NEXT: vprolvd %xmm1, %xmm0, %xmm0
414 %3 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
415 %4 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
416 %5 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %3, <4 x i32> %3, <4 x i32> %4)
417 %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
421 ; OSS Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9935
422 define i32 @fuzz9935() {
423 ; CHECK-LABEL: fuzz9935:
425 ; CHECK-NEXT: movl $-1, %eax
427 %1 = trunc i40 549755813887 to i32
434 ; Ensure we normalize the inner rotation before adding the results.
435 define i5 @rotl_merge_i5(i5 %x) {
436 ; CHECK-LABEL: rotl_merge_i5:
438 ; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
439 ; CHECK-NEXT: leal (,%rdi,4), %ecx
440 ; CHECK-NEXT: movl %edi, %eax
441 ; CHECK-NEXT: andb $24, %al
442 ; CHECK-NEXT: shrb $3, %al
443 ; CHECK-NEXT: orb %cl, %al
445 %r1 = call i5 @llvm.fshl.i5(i5 %x, i5 %x, i5 -1)
446 %r2 = call i5 @llvm.fshl.i5(i5 %r1, i5 %r1, i5 1)
449 declare i5 @llvm.fshl.i5(i5, i5, i5)
451 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
452 declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)