1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
13 ; Just one 32-bit run to make sure we do reasonable things for i64 rotates.
14 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X32-SSE,X32-SSE2
20 define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
21 ; SSE2-LABEL: var_rotate_v2i64:
23 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [64,64]
24 ; SSE2-NEXT: psubq %xmm1, %xmm2
25 ; SSE2-NEXT: movdqa %xmm0, %xmm3
26 ; SSE2-NEXT: psllq %xmm1, %xmm3
27 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
28 ; SSE2-NEXT: movdqa %xmm0, %xmm4
29 ; SSE2-NEXT: psllq %xmm1, %xmm4
30 ; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
31 ; SSE2-NEXT: movdqa %xmm0, %xmm1
32 ; SSE2-NEXT: psrlq %xmm2, %xmm1
33 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
34 ; SSE2-NEXT: psrlq %xmm2, %xmm0
35 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
36 ; SSE2-NEXT: orpd %xmm4, %xmm0
39 ; SSE41-LABEL: var_rotate_v2i64:
41 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [64,64]
42 ; SSE41-NEXT: psubq %xmm1, %xmm2
43 ; SSE41-NEXT: movdqa %xmm0, %xmm3
44 ; SSE41-NEXT: psllq %xmm1, %xmm3
45 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
46 ; SSE41-NEXT: movdqa %xmm0, %xmm4
47 ; SSE41-NEXT: psllq %xmm1, %xmm4
48 ; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm3[0,1,2,3],xmm4[4,5,6,7]
49 ; SSE41-NEXT: movdqa %xmm0, %xmm1
50 ; SSE41-NEXT: psrlq %xmm2, %xmm1
51 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
52 ; SSE41-NEXT: psrlq %xmm2, %xmm0
53 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
54 ; SSE41-NEXT: por %xmm4, %xmm0
57 ; AVX1-LABEL: var_rotate_v2i64:
59 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
60 ; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm2
61 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm3
62 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
63 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm1
64 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
65 ; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3
66 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
67 ; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm0
68 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
69 ; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
72 ; AVX2-LABEL: var_rotate_v2i64:
74 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
75 ; AVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm2
76 ; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm1
77 ; AVX2-NEXT: vpsrlvq %xmm2, %xmm0, %xmm0
78 ; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
81 ; AVX512F-LABEL: var_rotate_v2i64:
83 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
84 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
85 ; AVX512F-NEXT: vprolvq %zmm1, %zmm0, %zmm0
86 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
87 ; AVX512F-NEXT: vzeroupper
90 ; AVX512VL-LABEL: var_rotate_v2i64:
92 ; AVX512VL-NEXT: vprolvq %xmm1, %xmm0, %xmm0
95 ; AVX512BW-LABEL: var_rotate_v2i64:
97 ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
98 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
99 ; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
100 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
101 ; AVX512BW-NEXT: vzeroupper
102 ; AVX512BW-NEXT: retq
104 ; AVX512VLBW-LABEL: var_rotate_v2i64:
105 ; AVX512VLBW: # %bb.0:
106 ; AVX512VLBW-NEXT: vprolvq %xmm1, %xmm0, %xmm0
107 ; AVX512VLBW-NEXT: retq
109 ; XOP-LABEL: var_rotate_v2i64:
111 ; XOP-NEXT: vprotq %xmm1, %xmm0, %xmm0
114 ; X32-SSE-LABEL: var_rotate_v2i64:
116 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [64,0,64,0]
117 ; X32-SSE-NEXT: psubq %xmm1, %xmm2
118 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
119 ; X32-SSE-NEXT: psllq %xmm1, %xmm3
120 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
121 ; X32-SSE-NEXT: movdqa %xmm0, %xmm4
122 ; X32-SSE-NEXT: psllq %xmm1, %xmm4
123 ; X32-SSE-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
124 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
125 ; X32-SSE-NEXT: psrlq %xmm2, %xmm1
126 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
127 ; X32-SSE-NEXT: psrlq %xmm2, %xmm0
128 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
129 ; X32-SSE-NEXT: orpd %xmm4, %xmm0
131 %b64 = sub <2 x i64> <i64 64, i64 64>, %b
132 %shl = shl <2 x i64> %a, %b
133 %lshr = lshr <2 x i64> %a, %b64
134 %or = or <2 x i64> %shl, %lshr
138 define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
139 ; SSE2-LABEL: var_rotate_v4i32:
141 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
142 ; SSE2-NEXT: pslld $23, %xmm1
143 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
144 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
145 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
146 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
147 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
148 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
149 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
150 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
151 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
152 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
153 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
154 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
155 ; SSE2-NEXT: por %xmm3, %xmm0
158 ; SSE41-LABEL: var_rotate_v4i32:
160 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
161 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
162 ; SSE41-NEXT: pslld $23, %xmm1
163 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
164 ; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
165 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
166 ; SSE41-NEXT: pmuludq %xmm2, %xmm3
167 ; SSE41-NEXT: pmuludq %xmm1, %xmm0
168 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
169 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
170 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
171 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
172 ; SSE41-NEXT: por %xmm1, %xmm0
175 ; AVX1-LABEL: var_rotate_v4i32:
177 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
178 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
179 ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
180 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
181 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
182 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
183 ; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
184 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
185 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
186 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
187 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
188 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
189 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
192 ; AVX2-LABEL: var_rotate_v4i32:
194 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
195 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
196 ; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
197 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
198 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
199 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
200 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
203 ; AVX512F-LABEL: var_rotate_v4i32:
205 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
206 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
207 ; AVX512F-NEXT: vprolvd %zmm1, %zmm0, %zmm0
208 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
209 ; AVX512F-NEXT: vzeroupper
212 ; AVX512VL-LABEL: var_rotate_v4i32:
214 ; AVX512VL-NEXT: vprolvd %xmm1, %xmm0, %xmm0
215 ; AVX512VL-NEXT: retq
217 ; AVX512BW-LABEL: var_rotate_v4i32:
219 ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
220 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
221 ; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
222 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
223 ; AVX512BW-NEXT: vzeroupper
224 ; AVX512BW-NEXT: retq
226 ; AVX512VLBW-LABEL: var_rotate_v4i32:
227 ; AVX512VLBW: # %bb.0:
228 ; AVX512VLBW-NEXT: vprolvd %xmm1, %xmm0, %xmm0
229 ; AVX512VLBW-NEXT: retq
231 ; XOP-LABEL: var_rotate_v4i32:
233 ; XOP-NEXT: vprotd %xmm1, %xmm0, %xmm0
236 ; X32-SSE-LABEL: var_rotate_v4i32:
238 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
239 ; X32-SSE-NEXT: pslld $23, %xmm1
240 ; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1
241 ; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
242 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
243 ; X32-SSE-NEXT: pmuludq %xmm1, %xmm0
244 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
245 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
246 ; X32-SSE-NEXT: pmuludq %xmm2, %xmm1
247 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
248 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
249 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
250 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
251 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
252 ; X32-SSE-NEXT: por %xmm3, %xmm0
254 %b32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %b
255 %shl = shl <4 x i32> %a, %b
256 %lshr = lshr <4 x i32> %a, %b32
257 %or = or <4 x i32> %shl, %lshr
261 define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
262 ; SSE2-LABEL: var_rotate_v8i16:
264 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
265 ; SSE2-NEXT: movdqa %xmm1, %xmm2
266 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
267 ; SSE2-NEXT: pslld $23, %xmm2
268 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
269 ; SSE2-NEXT: paddd %xmm3, %xmm2
270 ; SSE2-NEXT: cvttps2dq %xmm2, %xmm2
271 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
272 ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
273 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
274 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
275 ; SSE2-NEXT: pslld $23, %xmm1
276 ; SSE2-NEXT: paddd %xmm3, %xmm1
277 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
278 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
279 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
280 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
281 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
282 ; SSE2-NEXT: movdqa %xmm0, %xmm2
283 ; SSE2-NEXT: pmulhuw %xmm1, %xmm2
284 ; SSE2-NEXT: pmullw %xmm1, %xmm0
285 ; SSE2-NEXT: por %xmm2, %xmm0
288 ; SSE41-LABEL: var_rotate_v8i16:
290 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
291 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
292 ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
293 ; SSE41-NEXT: pslld $23, %xmm1
294 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
295 ; SSE41-NEXT: paddd %xmm3, %xmm1
296 ; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
297 ; SSE41-NEXT: pslld $23, %xmm2
298 ; SSE41-NEXT: paddd %xmm3, %xmm2
299 ; SSE41-NEXT: cvttps2dq %xmm2, %xmm2
300 ; SSE41-NEXT: packusdw %xmm1, %xmm2
301 ; SSE41-NEXT: movdqa %xmm0, %xmm1
302 ; SSE41-NEXT: pmulhuw %xmm2, %xmm1
303 ; SSE41-NEXT: pmullw %xmm2, %xmm0
304 ; SSE41-NEXT: por %xmm1, %xmm0
307 ; AVX1-LABEL: var_rotate_v8i16:
309 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
310 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
311 ; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
312 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
313 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
314 ; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
315 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
316 ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
317 ; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
318 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
319 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
320 ; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2
321 ; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
322 ; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
325 ; AVX2-LABEL: var_rotate_v8i16:
327 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
328 ; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
329 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
330 ; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
331 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
332 ; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
333 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
334 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [16,16,16,16,16,16,16,16]
335 ; AVX2-NEXT: vpsubw %xmm1, %xmm4, %xmm1
336 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
337 ; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
338 ; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
339 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
340 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
341 ; AVX2-NEXT: vzeroupper
344 ; AVX512F-LABEL: var_rotate_v8i16:
346 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
347 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
348 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
349 ; AVX512F-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
350 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
351 ; AVX512F-NEXT: vpsubw %xmm1, %xmm3, %xmm1
352 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
353 ; AVX512F-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
354 ; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
355 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
356 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
357 ; AVX512F-NEXT: vzeroupper
360 ; AVX512VL-LABEL: var_rotate_v8i16:
362 ; AVX512VL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
363 ; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
364 ; AVX512VL-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
365 ; AVX512VL-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
366 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
367 ; AVX512VL-NEXT: vpsubw %xmm1, %xmm3, %xmm1
368 ; AVX512VL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
369 ; AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
370 ; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
371 ; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
372 ; AVX512VL-NEXT: vzeroupper
373 ; AVX512VL-NEXT: retq
375 ; AVX512BW-LABEL: var_rotate_v8i16:
377 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
378 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
379 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm2
380 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
381 ; AVX512BW-NEXT: vpsubw %xmm1, %xmm3, %xmm1
382 ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
383 ; AVX512BW-NEXT: vpor %xmm0, %xmm2, %xmm0
384 ; AVX512BW-NEXT: vzeroupper
385 ; AVX512BW-NEXT: retq
387 ; AVX512VLBW-LABEL: var_rotate_v8i16:
388 ; AVX512VLBW: # %bb.0:
389 ; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
390 ; AVX512VLBW-NEXT: vpsllvw %xmm1, %xmm0, %xmm2
391 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
392 ; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm3, %xmm1
393 ; AVX512VLBW-NEXT: vpsrlvw %xmm1, %xmm0, %xmm0
394 ; AVX512VLBW-NEXT: vpor %xmm0, %xmm2, %xmm0
395 ; AVX512VLBW-NEXT: retq
397 ; XOP-LABEL: var_rotate_v8i16:
399 ; XOP-NEXT: vprotw %xmm1, %xmm0, %xmm0
402 ; X32-SSE-LABEL: var_rotate_v8i16:
404 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
405 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
406 ; X32-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
407 ; X32-SSE-NEXT: pslld $23, %xmm2
408 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
409 ; X32-SSE-NEXT: paddd %xmm3, %xmm2
410 ; X32-SSE-NEXT: cvttps2dq %xmm2, %xmm2
411 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
412 ; X32-SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
413 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
414 ; X32-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
415 ; X32-SSE-NEXT: pslld $23, %xmm1
416 ; X32-SSE-NEXT: paddd %xmm3, %xmm1
417 ; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
418 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
419 ; X32-SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
420 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
421 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
422 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
423 ; X32-SSE-NEXT: pmulhuw %xmm1, %xmm2
424 ; X32-SSE-NEXT: pmullw %xmm1, %xmm0
425 ; X32-SSE-NEXT: por %xmm2, %xmm0
427 %b16 = sub <8 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %b
428 %shl = shl <8 x i16> %a, %b
429 %lshr = lshr <8 x i16> %a, %b16
430 %or = or <8 x i16> %shl, %lshr
434 define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
435 ; SSE2-LABEL: var_rotate_v16i8:
437 ; SSE2-NEXT: movdqa %xmm0, %xmm2
438 ; SSE2-NEXT: psllw $5, %xmm1
439 ; SSE2-NEXT: pxor %xmm0, %xmm0
440 ; SSE2-NEXT: pxor %xmm3, %xmm3
441 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
442 ; SSE2-NEXT: movdqa %xmm2, %xmm4
443 ; SSE2-NEXT: psrlw $4, %xmm4
444 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
445 ; SSE2-NEXT: movdqa %xmm2, %xmm5
446 ; SSE2-NEXT: psllw $4, %xmm5
447 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm5
448 ; SSE2-NEXT: por %xmm4, %xmm5
449 ; SSE2-NEXT: pand %xmm3, %xmm5
450 ; SSE2-NEXT: pandn %xmm2, %xmm3
451 ; SSE2-NEXT: por %xmm5, %xmm3
452 ; SSE2-NEXT: movdqa %xmm3, %xmm2
453 ; SSE2-NEXT: psrlw $6, %xmm2
454 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
455 ; SSE2-NEXT: movdqa %xmm3, %xmm4
456 ; SSE2-NEXT: psllw $2, %xmm4
457 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
458 ; SSE2-NEXT: por %xmm2, %xmm4
459 ; SSE2-NEXT: paddb %xmm1, %xmm1
460 ; SSE2-NEXT: pxor %xmm2, %xmm2
461 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
462 ; SSE2-NEXT: pand %xmm2, %xmm4
463 ; SSE2-NEXT: pandn %xmm3, %xmm2
464 ; SSE2-NEXT: por %xmm4, %xmm2
465 ; SSE2-NEXT: movdqa %xmm2, %xmm3
466 ; SSE2-NEXT: paddb %xmm2, %xmm3
467 ; SSE2-NEXT: movdqa %xmm2, %xmm4
468 ; SSE2-NEXT: psrlw $7, %xmm4
469 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
470 ; SSE2-NEXT: por %xmm3, %xmm4
471 ; SSE2-NEXT: paddb %xmm1, %xmm1
472 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm0
473 ; SSE2-NEXT: pand %xmm0, %xmm4
474 ; SSE2-NEXT: pandn %xmm2, %xmm0
475 ; SSE2-NEXT: por %xmm4, %xmm0
478 ; SSE41-LABEL: var_rotate_v16i8:
480 ; SSE41-NEXT: movdqa %xmm1, %xmm2
481 ; SSE41-NEXT: movdqa %xmm0, %xmm1
482 ; SSE41-NEXT: psrlw $4, %xmm0
483 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
484 ; SSE41-NEXT: movdqa %xmm1, %xmm3
485 ; SSE41-NEXT: psllw $4, %xmm3
486 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
487 ; SSE41-NEXT: por %xmm0, %xmm3
488 ; SSE41-NEXT: psllw $5, %xmm2
489 ; SSE41-NEXT: movdqa %xmm2, %xmm0
490 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
491 ; SSE41-NEXT: movdqa %xmm1, %xmm0
492 ; SSE41-NEXT: psrlw $6, %xmm0
493 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
494 ; SSE41-NEXT: movdqa %xmm1, %xmm3
495 ; SSE41-NEXT: psllw $2, %xmm3
496 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
497 ; SSE41-NEXT: por %xmm0, %xmm3
498 ; SSE41-NEXT: paddb %xmm2, %xmm2
499 ; SSE41-NEXT: movdqa %xmm2, %xmm0
500 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
501 ; SSE41-NEXT: movdqa %xmm1, %xmm0
502 ; SSE41-NEXT: paddb %xmm1, %xmm0
503 ; SSE41-NEXT: movdqa %xmm1, %xmm3
504 ; SSE41-NEXT: psrlw $7, %xmm3
505 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
506 ; SSE41-NEXT: por %xmm0, %xmm3
507 ; SSE41-NEXT: paddb %xmm2, %xmm2
508 ; SSE41-NEXT: movdqa %xmm2, %xmm0
509 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
510 ; SSE41-NEXT: movdqa %xmm1, %xmm0
513 ; AVX-LABEL: var_rotate_v16i8:
515 ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
516 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
517 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm3
518 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
519 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
520 ; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
521 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
522 ; AVX-NEXT: vpsrlw $6, %xmm0, %xmm2
523 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
524 ; AVX-NEXT: vpsllw $2, %xmm0, %xmm3
525 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
526 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
527 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
528 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
529 ; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2
530 ; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3
531 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
532 ; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
533 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
534 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
537 ; AVX512F-LABEL: var_rotate_v16i8:
539 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
540 ; AVX512F-NEXT: vpsubb %xmm1, %xmm2, %xmm2
541 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
542 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
543 ; AVX512F-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
544 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
545 ; AVX512F-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
546 ; AVX512F-NEXT: vpord %zmm0, %zmm1, %zmm0
547 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
548 ; AVX512F-NEXT: vzeroupper
551 ; AVX512VL-LABEL: var_rotate_v16i8:
553 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
554 ; AVX512VL-NEXT: vpsubb %xmm1, %xmm2, %xmm2
555 ; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
556 ; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
557 ; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
558 ; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
559 ; AVX512VL-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
560 ; AVX512VL-NEXT: vpord %zmm0, %zmm1, %zmm0
561 ; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
562 ; AVX512VL-NEXT: vzeroupper
563 ; AVX512VL-NEXT: retq
565 ; AVX512BW-LABEL: var_rotate_v16i8:
567 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
568 ; AVX512BW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
569 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
570 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
571 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
572 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
573 ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
574 ; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
575 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
576 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
577 ; AVX512BW-NEXT: vzeroupper
578 ; AVX512BW-NEXT: retq
580 ; AVX512VLBW-LABEL: var_rotate_v16i8:
581 ; AVX512VLBW: # %bb.0:
582 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
583 ; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
584 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
585 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
586 ; AVX512VLBW-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
587 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
588 ; AVX512VLBW-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
589 ; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
590 ; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
591 ; AVX512VLBW-NEXT: vzeroupper
592 ; AVX512VLBW-NEXT: retq
594 ; XOP-LABEL: var_rotate_v16i8:
596 ; XOP-NEXT: vprotb %xmm1, %xmm0, %xmm0
599 ; X32-SSE-LABEL: var_rotate_v16i8:
601 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
602 ; X32-SSE-NEXT: psllw $5, %xmm1
603 ; X32-SSE-NEXT: pxor %xmm0, %xmm0
604 ; X32-SSE-NEXT: pxor %xmm3, %xmm3
605 ; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3
606 ; X32-SSE-NEXT: movdqa %xmm2, %xmm4
607 ; X32-SSE-NEXT: psrlw $4, %xmm4
608 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
609 ; X32-SSE-NEXT: movdqa %xmm2, %xmm5
610 ; X32-SSE-NEXT: psllw $4, %xmm5
611 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm5
612 ; X32-SSE-NEXT: por %xmm4, %xmm5
613 ; X32-SSE-NEXT: pand %xmm3, %xmm5
614 ; X32-SSE-NEXT: pandn %xmm2, %xmm3
615 ; X32-SSE-NEXT: por %xmm5, %xmm3
616 ; X32-SSE-NEXT: movdqa %xmm3, %xmm2
617 ; X32-SSE-NEXT: psrlw $6, %xmm2
618 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2
619 ; X32-SSE-NEXT: movdqa %xmm3, %xmm4
620 ; X32-SSE-NEXT: psllw $2, %xmm4
621 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
622 ; X32-SSE-NEXT: por %xmm2, %xmm4
623 ; X32-SSE-NEXT: paddb %xmm1, %xmm1
624 ; X32-SSE-NEXT: pxor %xmm2, %xmm2
625 ; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2
626 ; X32-SSE-NEXT: pand %xmm2, %xmm4
627 ; X32-SSE-NEXT: pandn %xmm3, %xmm2
628 ; X32-SSE-NEXT: por %xmm4, %xmm2
629 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
630 ; X32-SSE-NEXT: paddb %xmm2, %xmm3
631 ; X32-SSE-NEXT: movdqa %xmm2, %xmm4
632 ; X32-SSE-NEXT: psrlw $7, %xmm4
633 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
634 ; X32-SSE-NEXT: por %xmm3, %xmm4
635 ; X32-SSE-NEXT: paddb %xmm1, %xmm1
636 ; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm0
637 ; X32-SSE-NEXT: pand %xmm0, %xmm4
638 ; X32-SSE-NEXT: pandn %xmm2, %xmm0
639 ; X32-SSE-NEXT: por %xmm4, %xmm0
641 %b8 = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
642 %shl = shl <16 x i8> %a, %b
643 %lshr = lshr <16 x i8> %a, %b8
644 %or = or <16 x i8> %shl, %lshr
649 ; Uniform Variable Rotates
652 define <2 x i64> @splatvar_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
653 ; SSE-LABEL: splatvar_rotate_v2i64:
655 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [64,64]
656 ; SSE-NEXT: psubq %xmm1, %xmm2
657 ; SSE-NEXT: movdqa %xmm0, %xmm3
658 ; SSE-NEXT: psllq %xmm1, %xmm3
659 ; SSE-NEXT: psrlq %xmm2, %xmm0
660 ; SSE-NEXT: por %xmm3, %xmm0
663 ; AVX-LABEL: splatvar_rotate_v2i64:
665 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [64,64]
666 ; AVX-NEXT: vpsubq %xmm1, %xmm2, %xmm2
667 ; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm1
668 ; AVX-NEXT: vpsrlq %xmm2, %xmm0, %xmm0
669 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
672 ; AVX512F-LABEL: splatvar_rotate_v2i64:
674 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
675 ; AVX512F-NEXT: vpbroadcastq %xmm1, %xmm1
676 ; AVX512F-NEXT: vprolvq %zmm1, %zmm0, %zmm0
677 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
678 ; AVX512F-NEXT: vzeroupper
681 ; AVX512VL-LABEL: splatvar_rotate_v2i64:
683 ; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm1
684 ; AVX512VL-NEXT: vprolvq %xmm1, %xmm0, %xmm0
685 ; AVX512VL-NEXT: retq
687 ; AVX512BW-LABEL: splatvar_rotate_v2i64:
689 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
690 ; AVX512BW-NEXT: vpbroadcastq %xmm1, %xmm1
691 ; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
692 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
693 ; AVX512BW-NEXT: vzeroupper
694 ; AVX512BW-NEXT: retq
696 ; AVX512VLBW-LABEL: splatvar_rotate_v2i64:
697 ; AVX512VLBW: # %bb.0:
698 ; AVX512VLBW-NEXT: vpbroadcastq %xmm1, %xmm1
699 ; AVX512VLBW-NEXT: vprolvq %xmm1, %xmm0, %xmm0
700 ; AVX512VLBW-NEXT: retq
702 ; XOPAVX1-LABEL: splatvar_rotate_v2i64:
704 ; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
705 ; XOPAVX1-NEXT: vprotq %xmm1, %xmm0, %xmm0
708 ; XOPAVX2-LABEL: splatvar_rotate_v2i64:
710 ; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1
711 ; XOPAVX2-NEXT: vprotq %xmm1, %xmm0, %xmm0
714 ; X32-SSE-LABEL: splatvar_rotate_v2i64:
716 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,0,1]
717 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [64,0,64,0]
718 ; X32-SSE-NEXT: psubq %xmm2, %xmm3
719 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
720 ; X32-SSE-NEXT: psllq %xmm1, %xmm2
721 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
722 ; X32-SSE-NEXT: psrlq %xmm3, %xmm1
723 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
724 ; X32-SSE-NEXT: psrlq %xmm3, %xmm0
725 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
726 ; X32-SSE-NEXT: orpd %xmm2, %xmm0
728 %splat = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
729 %splat64 = sub <2 x i64> <i64 64, i64 64>, %splat
730 %shl = shl <2 x i64> %a, %splat
731 %lshr = lshr <2 x i64> %a, %splat64
732 %or = or <2 x i64> %shl, %lshr
736 define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
737 ; SSE2-LABEL: splatvar_rotate_v4i32:
739 ; SSE2-NEXT: movd %xmm1, %eax
740 ; SSE2-NEXT: andl $31, %eax
741 ; SSE2-NEXT: movd %eax, %xmm1
742 ; SSE2-NEXT: movdqa %xmm0, %xmm2
743 ; SSE2-NEXT: pslld %xmm1, %xmm2
744 ; SSE2-NEXT: movl $32, %ecx
745 ; SSE2-NEXT: subl %eax, %ecx
746 ; SSE2-NEXT: movd %ecx, %xmm1
747 ; SSE2-NEXT: psrld %xmm1, %xmm0
748 ; SSE2-NEXT: por %xmm2, %xmm0
751 ; SSE41-LABEL: splatvar_rotate_v4i32:
753 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
754 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
755 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
756 ; SSE41-NEXT: movdqa %xmm0, %xmm3
757 ; SSE41-NEXT: pslld %xmm2, %xmm3
758 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
759 ; SSE41-NEXT: psubd %xmm1, %xmm2
760 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
761 ; SSE41-NEXT: psrld %xmm1, %xmm0
762 ; SSE41-NEXT: por %xmm3, %xmm0
765 ; AVX1-LABEL: splatvar_rotate_v4i32:
767 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
768 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
769 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
770 ; AVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
771 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
772 ; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
773 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
774 ; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
775 ; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
778 ; AVX2-LABEL: splatvar_rotate_v4i32:
780 ; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
781 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
782 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
783 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
784 ; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
785 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
786 ; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
787 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
788 ; AVX2-NEXT: vpsrld %xmm1, %xmm0, %xmm0
789 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
792 ; AVX512F-LABEL: splatvar_rotate_v4i32:
794 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
795 ; AVX512F-NEXT: vpbroadcastd %xmm1, %xmm1
796 ; AVX512F-NEXT: vprolvd %zmm1, %zmm0, %zmm0
797 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
798 ; AVX512F-NEXT: vzeroupper
801 ; AVX512VL-LABEL: splatvar_rotate_v4i32:
803 ; AVX512VL-NEXT: vpbroadcastd %xmm1, %xmm1
804 ; AVX512VL-NEXT: vprolvd %xmm1, %xmm0, %xmm0
805 ; AVX512VL-NEXT: retq
807 ; AVX512BW-LABEL: splatvar_rotate_v4i32:
809 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
810 ; AVX512BW-NEXT: vpbroadcastd %xmm1, %xmm1
811 ; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
812 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
813 ; AVX512BW-NEXT: vzeroupper
814 ; AVX512BW-NEXT: retq
816 ; AVX512VLBW-LABEL: splatvar_rotate_v4i32:
817 ; AVX512VLBW: # %bb.0:
818 ; AVX512VLBW-NEXT: vpbroadcastd %xmm1, %xmm1
819 ; AVX512VLBW-NEXT: vprolvd %xmm1, %xmm0, %xmm0
820 ; AVX512VLBW-NEXT: retq
822 ; XOPAVX1-LABEL: splatvar_rotate_v4i32:
824 ; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
825 ; XOPAVX1-NEXT: vprotd %xmm1, %xmm0, %xmm0
828 ; XOPAVX2-LABEL: splatvar_rotate_v4i32:
830 ; XOPAVX2-NEXT: vpbroadcastd %xmm1, %xmm1
831 ; XOPAVX2-NEXT: vprotd %xmm1, %xmm0, %xmm0
834 ; X32-SSE-LABEL: splatvar_rotate_v4i32:
836 ; X32-SSE-NEXT: movd %xmm1, %eax
837 ; X32-SSE-NEXT: andl $31, %eax
838 ; X32-SSE-NEXT: movd %eax, %xmm1
839 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
840 ; X32-SSE-NEXT: pslld %xmm1, %xmm2
841 ; X32-SSE-NEXT: movl $32, %ecx
842 ; X32-SSE-NEXT: subl %eax, %ecx
843 ; X32-SSE-NEXT: movd %ecx, %xmm1
844 ; X32-SSE-NEXT: psrld %xmm1, %xmm0
845 ; X32-SSE-NEXT: por %xmm2, %xmm0
847 %splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
848 %splat32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %splat
849 %shl = shl <4 x i32> %a, %splat
850 %lshr = lshr <4 x i32> %a, %splat32
851 %or = or <4 x i32> %shl, %lshr
855 define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
856 ; SSE2-LABEL: splatvar_rotate_v8i16:
858 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
859 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
860 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
861 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
862 ; SSE2-NEXT: psubw %xmm1, %xmm2
863 ; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
864 ; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
865 ; SSE2-NEXT: movdqa %xmm0, %xmm3
866 ; SSE2-NEXT: psllw %xmm1, %xmm3
867 ; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
868 ; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
869 ; SSE2-NEXT: psrlw %xmm2, %xmm0
870 ; SSE2-NEXT: por %xmm3, %xmm0
873 ; SSE41-LABEL: splatvar_rotate_v8i16:
875 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
876 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
877 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
878 ; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
879 ; SSE41-NEXT: movdqa %xmm0, %xmm3
880 ; SSE41-NEXT: psllw %xmm2, %xmm3
881 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
882 ; SSE41-NEXT: psubw %xmm1, %xmm2
883 ; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
884 ; SSE41-NEXT: psrlw %xmm1, %xmm0
885 ; SSE41-NEXT: por %xmm3, %xmm0
888 ; AVX1-LABEL: splatvar_rotate_v8i16:
890 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
891 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
892 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
893 ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
894 ; AVX1-NEXT: vpsllw %xmm2, %xmm0, %xmm2
895 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
896 ; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
897 ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
898 ; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
899 ; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
902 ; AVX2-LABEL: splatvar_rotate_v8i16:
904 ; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
905 ; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
906 ; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
907 ; AVX2-NEXT: vpsllw %xmm2, %xmm0, %xmm2
908 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
909 ; AVX2-NEXT: vpsubw %xmm1, %xmm3, %xmm1
910 ; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
911 ; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
912 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
915 ; AVX512-LABEL: splatvar_rotate_v8i16:
917 ; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
918 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
919 ; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
920 ; AVX512-NEXT: vpsllw %xmm2, %xmm0, %xmm2
921 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
922 ; AVX512-NEXT: vpsubw %xmm1, %xmm3, %xmm1
923 ; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
924 ; AVX512-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
925 ; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
928 ; XOPAVX1-LABEL: splatvar_rotate_v8i16:
930 ; XOPAVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
931 ; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
932 ; XOPAVX1-NEXT: vprotw %xmm1, %xmm0, %xmm0
935 ; XOPAVX2-LABEL: splatvar_rotate_v8i16:
937 ; XOPAVX2-NEXT: vpbroadcastw %xmm1, %xmm1
938 ; XOPAVX2-NEXT: vprotw %xmm1, %xmm0, %xmm0
941 ; X32-SSE-LABEL: splatvar_rotate_v8i16:
943 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
944 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
945 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
946 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
947 ; X32-SSE-NEXT: psubw %xmm1, %xmm2
948 ; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
949 ; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
950 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
951 ; X32-SSE-NEXT: psllw %xmm1, %xmm3
952 ; X32-SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
953 ; X32-SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
954 ; X32-SSE-NEXT: psrlw %xmm2, %xmm0
955 ; X32-SSE-NEXT: por %xmm3, %xmm0
957 %splat = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
958 %splat16 = sub <8 x i16> <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>, %splat
959 %shl = shl <8 x i16> %a, %splat
960 %lshr = lshr <8 x i16> %a, %splat16
961 %or = or <8 x i16> %shl, %lshr
965 define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
966 ; SSE2-LABEL: splatvar_rotate_v16i8:
968 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
969 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
970 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
971 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
972 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
973 ; SSE2-NEXT: psubb %xmm1, %xmm2
974 ; SSE2-NEXT: movdqa %xmm1, %xmm3
975 ; SSE2-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
976 ; SSE2-NEXT: psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
977 ; SSE2-NEXT: movdqa %xmm0, %xmm1
978 ; SSE2-NEXT: psllw %xmm3, %xmm1
979 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
980 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm5
981 ; SSE2-NEXT: psllw %xmm3, %xmm5
982 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
983 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[0,0,2,3,4,5,6,7]
984 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
985 ; SSE2-NEXT: pand %xmm3, %xmm1
986 ; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
987 ; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
988 ; SSE2-NEXT: psrlw %xmm2, %xmm0
989 ; SSE2-NEXT: psrlw %xmm2, %xmm4
990 ; SSE2-NEXT: psrlw $8, %xmm4
991 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
992 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[0,0,2,3,4,5,6,7]
993 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
994 ; SSE2-NEXT: pand %xmm0, %xmm2
995 ; SSE2-NEXT: por %xmm2, %xmm1
996 ; SSE2-NEXT: movdqa %xmm1, %xmm0
999 ; SSE41-LABEL: splatvar_rotate_v16i8:
1001 ; SSE41-NEXT: pxor %xmm3, %xmm3
1002 ; SSE41-NEXT: pshufb %xmm3, %xmm1
1003 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
1004 ; SSE41-NEXT: pmovzxbq {{.*#+}} xmm4 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1005 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1006 ; SSE41-NEXT: psllw %xmm4, %xmm2
1007 ; SSE41-NEXT: pcmpeqd %xmm5, %xmm5
1008 ; SSE41-NEXT: pcmpeqd %xmm6, %xmm6
1009 ; SSE41-NEXT: psllw %xmm4, %xmm6
1010 ; SSE41-NEXT: pshufb %xmm3, %xmm6
1011 ; SSE41-NEXT: pand %xmm6, %xmm2
1012 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
1013 ; SSE41-NEXT: psubb %xmm1, %xmm3
1014 ; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
1015 ; SSE41-NEXT: psrlw %xmm1, %xmm0
1016 ; SSE41-NEXT: psrlw %xmm1, %xmm5
1017 ; SSE41-NEXT: pshufb {{.*#+}} xmm5 = xmm5[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
1018 ; SSE41-NEXT: pand %xmm0, %xmm5
1019 ; SSE41-NEXT: por %xmm5, %xmm2
1020 ; SSE41-NEXT: movdqa %xmm2, %xmm0
1023 ; AVX1-LABEL: splatvar_rotate_v16i8:
1025 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1026 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1027 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
1028 ; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1029 ; AVX1-NEXT: vpsllw %xmm3, %xmm0, %xmm4
1030 ; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
1031 ; AVX1-NEXT: vpsllw %xmm3, %xmm5, %xmm3
1032 ; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
1033 ; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
1034 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
1035 ; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
1036 ; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1037 ; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
1038 ; AVX1-NEXT: vpsrlw %xmm1, %xmm5, %xmm1
1039 ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
1040 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
1041 ; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
1044 ; AVX2-LABEL: splatvar_rotate_v16i8:
1046 ; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
1047 ; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
1048 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1049 ; AVX2-NEXT: vpsllw %xmm2, %xmm0, %xmm3
1050 ; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
1051 ; AVX2-NEXT: vpsllw %xmm2, %xmm4, %xmm2
1052 ; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2
1053 ; AVX2-NEXT: vpand %xmm2, %xmm3, %xmm2
1054 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
1055 ; AVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
1056 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
1057 ; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
1058 ; AVX2-NEXT: vpsrlw %xmm1, %xmm4, %xmm1
1059 ; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
1060 ; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
1061 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
1062 ; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
1065 ; AVX512F-LABEL: splatvar_rotate_v16i8:
1067 ; AVX512F-NEXT: vpbroadcastb %xmm1, %xmm1
1068 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
1069 ; AVX512F-NEXT: vpsubb %xmm1, %xmm2, %xmm2
1070 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
1071 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
1072 ; AVX512F-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
1073 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
1074 ; AVX512F-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
1075 ; AVX512F-NEXT: vpord %zmm0, %zmm1, %zmm0
1076 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
1077 ; AVX512F-NEXT: vzeroupper
1078 ; AVX512F-NEXT: retq
1080 ; AVX512VL-LABEL: splatvar_rotate_v16i8:
1081 ; AVX512VL: # %bb.0:
1082 ; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
1083 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
1084 ; AVX512VL-NEXT: vpsubb %xmm1, %xmm2, %xmm2
1085 ; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
1086 ; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
1087 ; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
1088 ; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
1089 ; AVX512VL-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
1090 ; AVX512VL-NEXT: vpord %zmm0, %zmm1, %zmm0
1091 ; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
1092 ; AVX512VL-NEXT: vzeroupper
1093 ; AVX512VL-NEXT: retq
1095 ; AVX512BW-LABEL: splatvar_rotate_v16i8:
1096 ; AVX512BW: # %bb.0:
1097 ; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
1098 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
1099 ; AVX512BW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
1100 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1101 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
1102 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
1103 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
1104 ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
1105 ; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
1106 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1107 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1108 ; AVX512BW-NEXT: vzeroupper
1109 ; AVX512BW-NEXT: retq
1111 ; AVX512VLBW-LABEL: splatvar_rotate_v16i8:
1112 ; AVX512VLBW: # %bb.0:
1113 ; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %xmm1
1114 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
1115 ; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
1116 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1117 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
1118 ; AVX512VLBW-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
1119 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
1120 ; AVX512VLBW-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
1121 ; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
1122 ; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
1123 ; AVX512VLBW-NEXT: vzeroupper
1124 ; AVX512VLBW-NEXT: retq
1126 ; XOPAVX1-LABEL: splatvar_rotate_v16i8:
1128 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1129 ; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1130 ; XOPAVX1-NEXT: vprotb %xmm1, %xmm0, %xmm0
1131 ; XOPAVX1-NEXT: retq
1133 ; XOPAVX2-LABEL: splatvar_rotate_v16i8:
1135 ; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
1136 ; XOPAVX2-NEXT: vprotb %xmm1, %xmm0, %xmm0
1137 ; XOPAVX2-NEXT: retq
1139 ; X32-SSE-LABEL: splatvar_rotate_v16i8:
1141 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1142 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
1143 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
1144 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
1145 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
1146 ; X32-SSE-NEXT: psubb %xmm1, %xmm2
1147 ; X32-SSE-NEXT: movdqa %xmm1, %xmm3
1148 ; X32-SSE-NEXT: pslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm3[0]
1149 ; X32-SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1150 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1151 ; X32-SSE-NEXT: psllw %xmm3, %xmm1
1152 ; X32-SSE-NEXT: pcmpeqd %xmm4, %xmm4
1153 ; X32-SSE-NEXT: pcmpeqd %xmm5, %xmm5
1154 ; X32-SSE-NEXT: psllw %xmm3, %xmm5
1155 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1156 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[0,0,2,3,4,5,6,7]
1157 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
1158 ; X32-SSE-NEXT: pand %xmm3, %xmm1
1159 ; X32-SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
1160 ; X32-SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1161 ; X32-SSE-NEXT: psrlw %xmm2, %xmm0
1162 ; X32-SSE-NEXT: psrlw %xmm2, %xmm4
1163 ; X32-SSE-NEXT: psrlw $8, %xmm4
1164 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1165 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[0,0,2,3,4,5,6,7]
1166 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
1167 ; X32-SSE-NEXT: pand %xmm0, %xmm2
1168 ; X32-SSE-NEXT: por %xmm2, %xmm1
1169 ; X32-SSE-NEXT: movdqa %xmm1, %xmm0
1170 ; X32-SSE-NEXT: retl
1171 %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
1172 %splat8 = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
1173 %shl = shl <16 x i8> %a, %splat
1174 %lshr = lshr <16 x i8> %a, %splat8
1175 %or = or <16 x i8> %shl, %lshr
1183 define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
1184 ; SSE2-LABEL: constant_rotate_v2i64:
1186 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1187 ; SSE2-NEXT: psllq $4, %xmm1
1188 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1189 ; SSE2-NEXT: psllq $14, %xmm2
1190 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
1191 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1192 ; SSE2-NEXT: psrlq $60, %xmm1
1193 ; SSE2-NEXT: psrlq $50, %xmm0
1194 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1195 ; SSE2-NEXT: orpd %xmm2, %xmm0
1198 ; SSE41-LABEL: constant_rotate_v2i64:
1200 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1201 ; SSE41-NEXT: psllq $14, %xmm1
1202 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1203 ; SSE41-NEXT: psllq $4, %xmm2
1204 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
1205 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1206 ; SSE41-NEXT: psrlq $50, %xmm1
1207 ; SSE41-NEXT: psrlq $60, %xmm0
1208 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
1209 ; SSE41-NEXT: por %xmm2, %xmm0
1212 ; AVX1-LABEL: constant_rotate_v2i64:
1214 ; AVX1-NEXT: vpsllq $14, %xmm0, %xmm1
1215 ; AVX1-NEXT: vpsllq $4, %xmm0, %xmm2
1216 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
1217 ; AVX1-NEXT: vpsrlq $50, %xmm0, %xmm2
1218 ; AVX1-NEXT: vpsrlq $60, %xmm0, %xmm0
1219 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
1220 ; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
1223 ; AVX2-LABEL: constant_rotate_v2i64:
1225 ; AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1
1226 ; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
1227 ; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
1230 ; AVX512F-LABEL: constant_rotate_v2i64:
1232 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1233 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [4,14]
1234 ; AVX512F-NEXT: vprolvq %zmm1, %zmm0, %zmm0
1235 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1236 ; AVX512F-NEXT: vzeroupper
1237 ; AVX512F-NEXT: retq
1239 ; AVX512VL-LABEL: constant_rotate_v2i64:
1240 ; AVX512VL: # %bb.0:
1241 ; AVX512VL-NEXT: vprolvq {{.*}}(%rip), %xmm0, %xmm0
1242 ; AVX512VL-NEXT: retq
1244 ; AVX512BW-LABEL: constant_rotate_v2i64:
1245 ; AVX512BW: # %bb.0:
1246 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1247 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,14]
1248 ; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
1249 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1250 ; AVX512BW-NEXT: vzeroupper
1251 ; AVX512BW-NEXT: retq
1253 ; AVX512VLBW-LABEL: constant_rotate_v2i64:
1254 ; AVX512VLBW: # %bb.0:
1255 ; AVX512VLBW-NEXT: vprolvq {{.*}}(%rip), %xmm0, %xmm0
1256 ; AVX512VLBW-NEXT: retq
1258 ; XOP-LABEL: constant_rotate_v2i64:
1260 ; XOP-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm0
1263 ; X32-SSE-LABEL: constant_rotate_v2i64:
1265 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1266 ; X32-SSE-NEXT: psllq $4, %xmm1
1267 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
1268 ; X32-SSE-NEXT: psllq $14, %xmm2
1269 ; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
1270 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1271 ; X32-SSE-NEXT: psrlq $60, %xmm1
1272 ; X32-SSE-NEXT: psrlq $50, %xmm0
1273 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
1274 ; X32-SSE-NEXT: orpd %xmm2, %xmm0
1275 ; X32-SSE-NEXT: retl
1276 %shl = shl <2 x i64> %a, <i64 4, i64 14>
1277 %lshr = lshr <2 x i64> %a, <i64 60, i64 50>
1278 %or = or <2 x i64> %shl, %lshr
1282 define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
1283 ; SSE2-LABEL: constant_rotate_v4i32:
1285 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
1286 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
1287 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
1288 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
1289 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
1290 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
1291 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
1292 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
1293 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1294 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
1295 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1296 ; SSE2-NEXT: por %xmm3, %xmm0
1299 ; SSE41-LABEL: constant_rotate_v4i32:
1301 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
1302 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
1303 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
1304 ; SSE41-NEXT: pmuludq %xmm2, %xmm3
1305 ; SSE41-NEXT: pmuludq %xmm1, %xmm0
1306 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
1307 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
1308 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
1309 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1310 ; SSE41-NEXT: por %xmm1, %xmm0
1313 ; AVX1-LABEL: constant_rotate_v4i32:
1315 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [16,32,64,128]
1316 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
1317 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
1318 ; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
1319 ; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
1320 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
1321 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
1322 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
1323 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1324 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
1327 ; AVX2-LABEL: constant_rotate_v4i32:
1329 ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
1330 ; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
1331 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
1334 ; AVX512F-LABEL: constant_rotate_v4i32:
1336 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1337 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
1338 ; AVX512F-NEXT: vprolvd %zmm1, %zmm0, %zmm0
1339 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1340 ; AVX512F-NEXT: vzeroupper
1341 ; AVX512F-NEXT: retq
1343 ; AVX512VL-LABEL: constant_rotate_v4i32:
1344 ; AVX512VL: # %bb.0:
1345 ; AVX512VL-NEXT: vprolvd {{.*}}(%rip), %xmm0, %xmm0
1346 ; AVX512VL-NEXT: retq
1348 ; AVX512BW-LABEL: constant_rotate_v4i32:
1349 ; AVX512BW: # %bb.0:
1350 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1351 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
1352 ; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
1353 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1354 ; AVX512BW-NEXT: vzeroupper
1355 ; AVX512BW-NEXT: retq
1357 ; AVX512VLBW-LABEL: constant_rotate_v4i32:
1358 ; AVX512VLBW: # %bb.0:
1359 ; AVX512VLBW-NEXT: vprolvd {{.*}}(%rip), %xmm0, %xmm0
1360 ; AVX512VLBW-NEXT: retq
1362 ; XOP-LABEL: constant_rotate_v4i32:
1364 ; XOP-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
1367 ; X32-SSE-LABEL: constant_rotate_v4i32:
1369 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
1370 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
1371 ; X32-SSE-NEXT: pmuludq %xmm1, %xmm0
1372 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
1373 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
1374 ; X32-SSE-NEXT: pmuludq %xmm2, %xmm1
1375 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
1376 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
1377 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1378 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
1379 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1380 ; X32-SSE-NEXT: por %xmm3, %xmm0
1381 ; X32-SSE-NEXT: retl
1382 %shl = shl <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7>
1383 %lshr = lshr <4 x i32> %a, <i32 28, i32 27, i32 26, i32 25>
1384 %or = or <4 x i32> %shl, %lshr
1388 define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
1389 ; SSE-LABEL: constant_rotate_v8i16:
1391 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
1392 ; SSE-NEXT: movdqa %xmm0, %xmm2
1393 ; SSE-NEXT: pmulhuw %xmm1, %xmm2
1394 ; SSE-NEXT: pmullw %xmm1, %xmm0
1395 ; SSE-NEXT: por %xmm2, %xmm0
1398 ; AVX-LABEL: constant_rotate_v8i16:
1400 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
1401 ; AVX-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2
1402 ; AVX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
1403 ; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
1406 ; AVX512F-LABEL: constant_rotate_v8i16:
1408 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
1409 ; AVX512F-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2
1410 ; AVX512F-NEXT: vpmullw %xmm1, %xmm0, %xmm0
1411 ; AVX512F-NEXT: vpor %xmm2, %xmm0, %xmm0
1412 ; AVX512F-NEXT: retq
1414 ; AVX512VL-LABEL: constant_rotate_v8i16:
1415 ; AVX512VL: # %bb.0:
1416 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
1417 ; AVX512VL-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2
1418 ; AVX512VL-NEXT: vpmullw %xmm1, %xmm0, %xmm0
1419 ; AVX512VL-NEXT: vpor %xmm2, %xmm0, %xmm0
1420 ; AVX512VL-NEXT: retq
1422 ; AVX512BW-LABEL: constant_rotate_v8i16:
1423 ; AVX512BW: # %bb.0:
1424 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1425 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
1426 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9]
1427 ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm2
1428 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
1429 ; AVX512BW-NEXT: vpor %xmm2, %xmm0, %xmm0
1430 ; AVX512BW-NEXT: vzeroupper
1431 ; AVX512BW-NEXT: retq
1433 ; AVX512VLBW-LABEL: constant_rotate_v8i16:
1434 ; AVX512VLBW: # %bb.0:
1435 ; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm1
1436 ; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %xmm0, %xmm0
1437 ; AVX512VLBW-NEXT: vpor %xmm1, %xmm0, %xmm0
1438 ; AVX512VLBW-NEXT: retq
1440 ; XOP-LABEL: constant_rotate_v8i16:
1442 ; XOP-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
1445 ; X32-SSE-LABEL: constant_rotate_v8i16:
1447 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
1448 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
1449 ; X32-SSE-NEXT: pmulhuw %xmm1, %xmm2
1450 ; X32-SSE-NEXT: pmullw %xmm1, %xmm0
1451 ; X32-SSE-NEXT: por %xmm2, %xmm0
1452 ; X32-SSE-NEXT: retl
1453 %shl = shl <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
1454 %lshr = lshr <8 x i16> %a, <i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9>
1455 %or = or <8 x i16> %shl, %lshr
1459 define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
1460 ; SSE2-LABEL: constant_rotate_v16i8:
1462 ; SSE2-NEXT: pxor %xmm1, %xmm1
1463 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1464 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
1465 ; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm2
1466 ; SSE2-NEXT: psrlw $8, %xmm2
1467 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1468 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
1469 ; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm3
1470 ; SSE2-NEXT: psrlw $8, %xmm3
1471 ; SSE2-NEXT: packuswb %xmm2, %xmm3
1472 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1473 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
1474 ; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm1
1475 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
1476 ; SSE2-NEXT: pand %xmm2, %xmm1
1477 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1478 ; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0
1479 ; SSE2-NEXT: pand %xmm2, %xmm0
1480 ; SSE2-NEXT: packuswb %xmm1, %xmm0
1481 ; SSE2-NEXT: por %xmm3, %xmm0
1484 ; SSE41-LABEL: constant_rotate_v16i8:
1486 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1487 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
1488 ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm2
1489 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
1490 ; SSE41-NEXT: pand %xmm3, %xmm2
1491 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1492 ; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
1493 ; SSE41-NEXT: pmullw %xmm1, %xmm4
1494 ; SSE41-NEXT: pand %xmm3, %xmm4
1495 ; SSE41-NEXT: packuswb %xmm2, %xmm4
1496 ; SSE41-NEXT: pxor %xmm2, %xmm2
1497 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
1498 ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0
1499 ; SSE41-NEXT: psrlw $8, %xmm0
1500 ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm1
1501 ; SSE41-NEXT: psrlw $8, %xmm1
1502 ; SSE41-NEXT: packuswb %xmm0, %xmm1
1503 ; SSE41-NEXT: por %xmm4, %xmm1
1504 ; SSE41-NEXT: movdqa %xmm1, %xmm0
1507 ; AVX1-LABEL: constant_rotate_v16i8:
1509 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
1510 ; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
1511 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
1512 ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
1513 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1514 ; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm3, %xmm4
1515 ; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
1516 ; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
1517 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1518 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
1519 ; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
1520 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
1521 ; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm3, %xmm2
1522 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
1523 ; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
1524 ; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
1527 ; AVX2-LABEL: constant_rotate_v16i8:
1529 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1530 ; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm1
1531 ; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
1532 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
1533 ; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
1534 ; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
1535 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
1536 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
1537 ; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
1538 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
1539 ; AVX2-NEXT: vzeroupper
1542 ; AVX512F-LABEL: constant_rotate_v16i8:
1544 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
1545 ; AVX512F-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm1
1546 ; AVX512F-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
1547 ; AVX512F-NEXT: vpord %zmm0, %zmm1, %zmm0
1548 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
1549 ; AVX512F-NEXT: vzeroupper
1550 ; AVX512F-NEXT: retq
1552 ; AVX512VL-LABEL: constant_rotate_v16i8:
1553 ; AVX512VL: # %bb.0:
1554 ; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
1555 ; AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm1
1556 ; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
1557 ; AVX512VL-NEXT: vpord %zmm0, %zmm1, %zmm0
1558 ; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
1559 ; AVX512VL-NEXT: vzeroupper
1560 ; AVX512VL-NEXT: retq
1562 ; AVX512BW-LABEL: constant_rotate_v16i8:
1563 ; AVX512BW: # %bb.0:
1564 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
1565 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1566 ; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
1567 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
1568 ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
1569 ; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
1570 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1571 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1572 ; AVX512BW-NEXT: vzeroupper
1573 ; AVX512BW-NEXT: retq
1575 ; AVX512VLBW-LABEL: constant_rotate_v16i8:
1576 ; AVX512VLBW: # %bb.0:
1577 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1578 ; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm1
1579 ; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
1580 ; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
1581 ; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
1582 ; AVX512VLBW-NEXT: vzeroupper
1583 ; AVX512VLBW-NEXT: retq
1585 ; XOP-LABEL: constant_rotate_v16i8:
1587 ; XOP-NEXT: vprotb {{.*}}(%rip), %xmm0, %xmm0
1590 ; X32-SSE-LABEL: constant_rotate_v16i8:
1592 ; X32-SSE-NEXT: pxor %xmm1, %xmm1
1593 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
1594 ; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
1595 ; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm2
1596 ; X32-SSE-NEXT: psrlw $8, %xmm2
1597 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
1598 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
1599 ; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm3
1600 ; X32-SSE-NEXT: psrlw $8, %xmm3
1601 ; X32-SSE-NEXT: packuswb %xmm2, %xmm3
1602 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1603 ; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
1604 ; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm1
1605 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
1606 ; X32-SSE-NEXT: pand %xmm2, %xmm1
1607 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1608 ; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
1609 ; X32-SSE-NEXT: pand %xmm2, %xmm0
1610 ; X32-SSE-NEXT: packuswb %xmm1, %xmm0
1611 ; X32-SSE-NEXT: por %xmm3, %xmm0
1612 ; X32-SSE-NEXT: retl
1613 %shl = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
1614 %lshr = lshr <16 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
1615 %or = or <16 x i8> %shl, %lshr
1620 ; Uniform Constant Rotates
1623 define <2 x i64> @splatconstant_rotate_v2i64(<2 x i64> %a) nounwind {
1624 ; SSE-LABEL: splatconstant_rotate_v2i64:
1626 ; SSE-NEXT: movdqa %xmm0, %xmm1
1627 ; SSE-NEXT: psllq $14, %xmm1
1628 ; SSE-NEXT: psrlq $50, %xmm0
1629 ; SSE-NEXT: por %xmm1, %xmm0
1632 ; AVX-LABEL: splatconstant_rotate_v2i64:
1634 ; AVX-NEXT: vpsllq $14, %xmm0, %xmm1
1635 ; AVX-NEXT: vpsrlq $50, %xmm0, %xmm0
1636 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
1639 ; AVX512F-LABEL: splatconstant_rotate_v2i64:
1641 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1642 ; AVX512F-NEXT: vprolq $14, %zmm0, %zmm0
1643 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1644 ; AVX512F-NEXT: vzeroupper
1645 ; AVX512F-NEXT: retq
1647 ; AVX512VL-LABEL: splatconstant_rotate_v2i64:
1648 ; AVX512VL: # %bb.0:
1649 ; AVX512VL-NEXT: vprolq $14, %xmm0, %xmm0
1650 ; AVX512VL-NEXT: retq
1652 ; AVX512BW-LABEL: splatconstant_rotate_v2i64:
1653 ; AVX512BW: # %bb.0:
1654 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1655 ; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
1656 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1657 ; AVX512BW-NEXT: vzeroupper
1658 ; AVX512BW-NEXT: retq
1660 ; AVX512VLBW-LABEL: splatconstant_rotate_v2i64:
1661 ; AVX512VLBW: # %bb.0:
1662 ; AVX512VLBW-NEXT: vprolq $14, %xmm0, %xmm0
1663 ; AVX512VLBW-NEXT: retq
1665 ; XOP-LABEL: splatconstant_rotate_v2i64:
1667 ; XOP-NEXT: vprotq $14, %xmm0, %xmm0
1670 ; X32-SSE-LABEL: splatconstant_rotate_v2i64:
1672 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1673 ; X32-SSE-NEXT: psllq $14, %xmm1
1674 ; X32-SSE-NEXT: psrlq $50, %xmm0
1675 ; X32-SSE-NEXT: por %xmm1, %xmm0
1676 ; X32-SSE-NEXT: retl
1677 %shl = shl <2 x i64> %a, <i64 14, i64 14>
1678 %lshr = lshr <2 x i64> %a, <i64 50, i64 50>
1679 %or = or <2 x i64> %shl, %lshr
1683 define <4 x i32> @splatconstant_rotate_v4i32(<4 x i32> %a) nounwind {
1684 ; SSE-LABEL: splatconstant_rotate_v4i32:
1686 ; SSE-NEXT: movdqa %xmm0, %xmm1
1687 ; SSE-NEXT: psrld $28, %xmm1
1688 ; SSE-NEXT: pslld $4, %xmm0
1689 ; SSE-NEXT: por %xmm1, %xmm0
1692 ; AVX-LABEL: splatconstant_rotate_v4i32:
1694 ; AVX-NEXT: vpsrld $28, %xmm0, %xmm1
1695 ; AVX-NEXT: vpslld $4, %xmm0, %xmm0
1696 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
1699 ; AVX512F-LABEL: splatconstant_rotate_v4i32:
1701 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1702 ; AVX512F-NEXT: vprold $4, %zmm0, %zmm0
1703 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1704 ; AVX512F-NEXT: vzeroupper
1705 ; AVX512F-NEXT: retq
1707 ; AVX512VL-LABEL: splatconstant_rotate_v4i32:
1708 ; AVX512VL: # %bb.0:
1709 ; AVX512VL-NEXT: vprold $4, %xmm0, %xmm0
1710 ; AVX512VL-NEXT: retq
1712 ; AVX512BW-LABEL: splatconstant_rotate_v4i32:
1713 ; AVX512BW: # %bb.0:
1714 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1715 ; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
1716 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1717 ; AVX512BW-NEXT: vzeroupper
1718 ; AVX512BW-NEXT: retq
1720 ; AVX512VLBW-LABEL: splatconstant_rotate_v4i32:
1721 ; AVX512VLBW: # %bb.0:
1722 ; AVX512VLBW-NEXT: vprold $4, %xmm0, %xmm0
1723 ; AVX512VLBW-NEXT: retq
1725 ; XOP-LABEL: splatconstant_rotate_v4i32:
1727 ; XOP-NEXT: vprotd $4, %xmm0, %xmm0
1730 ; X32-SSE-LABEL: splatconstant_rotate_v4i32:
1732 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1733 ; X32-SSE-NEXT: psrld $28, %xmm1
1734 ; X32-SSE-NEXT: pslld $4, %xmm0
1735 ; X32-SSE-NEXT: por %xmm1, %xmm0
1736 ; X32-SSE-NEXT: retl
1737 %shl = shl <4 x i32> %a, <i32 4, i32 4, i32 4, i32 4>
1738 %lshr = lshr <4 x i32> %a, <i32 28, i32 28, i32 28, i32 28>
1739 %or = or <4 x i32> %shl, %lshr
1743 define <8 x i16> @splatconstant_rotate_v8i16(<8 x i16> %a) nounwind {
1744 ; SSE-LABEL: splatconstant_rotate_v8i16:
1746 ; SSE-NEXT: movdqa %xmm0, %xmm1
1747 ; SSE-NEXT: psrlw $9, %xmm1
1748 ; SSE-NEXT: psllw $7, %xmm0
1749 ; SSE-NEXT: por %xmm1, %xmm0
1752 ; AVX-LABEL: splatconstant_rotate_v8i16:
1754 ; AVX-NEXT: vpsrlw $9, %xmm0, %xmm1
1755 ; AVX-NEXT: vpsllw $7, %xmm0, %xmm0
1756 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
1759 ; AVX512-LABEL: splatconstant_rotate_v8i16:
1761 ; AVX512-NEXT: vpsrlw $9, %xmm0, %xmm1
1762 ; AVX512-NEXT: vpsllw $7, %xmm0, %xmm0
1763 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
1766 ; XOP-LABEL: splatconstant_rotate_v8i16:
1768 ; XOP-NEXT: vprotw $7, %xmm0, %xmm0
1771 ; X32-SSE-LABEL: splatconstant_rotate_v8i16:
1773 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1774 ; X32-SSE-NEXT: psrlw $9, %xmm1
1775 ; X32-SSE-NEXT: psllw $7, %xmm0
1776 ; X32-SSE-NEXT: por %xmm1, %xmm0
1777 ; X32-SSE-NEXT: retl
1778 %shl = shl <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
1779 %lshr = lshr <8 x i16> %a, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
1780 %or = or <8 x i16> %shl, %lshr
1784 define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind {
1785 ; SSE-LABEL: splatconstant_rotate_v16i8:
1787 ; SSE-NEXT: movdqa %xmm0, %xmm1
1788 ; SSE-NEXT: psrlw $4, %xmm1
1789 ; SSE-NEXT: pand {{.*}}(%rip), %xmm1
1790 ; SSE-NEXT: psllw $4, %xmm0
1791 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
1792 ; SSE-NEXT: por %xmm1, %xmm0
1795 ; AVX-LABEL: splatconstant_rotate_v16i8:
1797 ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1
1798 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
1799 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm0
1800 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1801 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
1804 ; AVX512F-LABEL: splatconstant_rotate_v16i8:
1806 ; AVX512F-NEXT: vpsllw $4, %xmm0, %xmm1
1807 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
1808 ; AVX512F-NEXT: vpsrlw $4, %xmm0, %xmm0
1809 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1810 ; AVX512F-NEXT: vpor %xmm0, %xmm1, %xmm0
1811 ; AVX512F-NEXT: retq
1813 ; AVX512VL-LABEL: splatconstant_rotate_v16i8:
1814 ; AVX512VL: # %bb.0:
1815 ; AVX512VL-NEXT: vpsllw $4, %xmm0, %xmm1
1816 ; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm0
1817 ; AVX512VL-NEXT: vpternlogq $216, {{.*}}(%rip), %xmm1, %xmm0
1818 ; AVX512VL-NEXT: retq
1820 ; AVX512BW-LABEL: splatconstant_rotate_v16i8:
1821 ; AVX512BW: # %bb.0:
1822 ; AVX512BW-NEXT: vpsllw $4, %xmm0, %xmm1
1823 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
1824 ; AVX512BW-NEXT: vpsrlw $4, %xmm0, %xmm0
1825 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1826 ; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
1827 ; AVX512BW-NEXT: retq
1829 ; AVX512VLBW-LABEL: splatconstant_rotate_v16i8:
1830 ; AVX512VLBW: # %bb.0:
1831 ; AVX512VLBW-NEXT: vpsllw $4, %xmm0, %xmm1
1832 ; AVX512VLBW-NEXT: vpsrlw $4, %xmm0, %xmm0
1833 ; AVX512VLBW-NEXT: vpternlogq $216, {{.*}}(%rip), %xmm1, %xmm0
1834 ; AVX512VLBW-NEXT: retq
1836 ; XOP-LABEL: splatconstant_rotate_v16i8:
1838 ; XOP-NEXT: vprotb $4, %xmm0, %xmm0
1841 ; X32-SSE-LABEL: splatconstant_rotate_v16i8:
1843 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1844 ; X32-SSE-NEXT: psrlw $4, %xmm1
1845 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
1846 ; X32-SSE-NEXT: psllw $4, %xmm0
1847 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
1848 ; X32-SSE-NEXT: por %xmm1, %xmm0
1849 ; X32-SSE-NEXT: retl
1850 %shl = shl <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
1851 %lshr = lshr <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
1852 %or = or <16 x i8> %shl, %lshr
1857 ; Masked Uniform Constant Rotates
1860 define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind {
1861 ; SSE-LABEL: splatconstant_rotate_mask_v2i64:
1863 ; SSE-NEXT: psrlq $49, %xmm0
1864 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
1867 ; AVX-LABEL: splatconstant_rotate_mask_v2i64:
1869 ; AVX-NEXT: vpsrlq $49, %xmm0, %xmm0
1870 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1873 ; AVX512F-LABEL: splatconstant_rotate_mask_v2i64:
1875 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1876 ; AVX512F-NEXT: vprolq $15, %zmm0, %zmm0
1877 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1878 ; AVX512F-NEXT: vzeroupper
1879 ; AVX512F-NEXT: retq
1881 ; AVX512VL-LABEL: splatconstant_rotate_mask_v2i64:
1882 ; AVX512VL: # %bb.0:
1883 ; AVX512VL-NEXT: vprolq $15, %xmm0, %xmm0
1884 ; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1885 ; AVX512VL-NEXT: retq
1887 ; AVX512BW-LABEL: splatconstant_rotate_mask_v2i64:
1888 ; AVX512BW: # %bb.0:
1889 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1890 ; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
1891 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1892 ; AVX512BW-NEXT: vzeroupper
1893 ; AVX512BW-NEXT: retq
1895 ; AVX512VLBW-LABEL: splatconstant_rotate_mask_v2i64:
1896 ; AVX512VLBW: # %bb.0:
1897 ; AVX512VLBW-NEXT: vprolq $15, %xmm0, %xmm0
1898 ; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1899 ; AVX512VLBW-NEXT: retq
1901 ; XOP-LABEL: splatconstant_rotate_mask_v2i64:
1903 ; XOP-NEXT: vprotq $15, %xmm0, %xmm0
1904 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1907 ; X32-SSE-LABEL: splatconstant_rotate_mask_v2i64:
1909 ; X32-SSE-NEXT: psrlq $49, %xmm0
1910 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
1911 ; X32-SSE-NEXT: retl
1912 %shl = shl <2 x i64> %a, <i64 15, i64 15>
1913 %lshr = lshr <2 x i64> %a, <i64 49, i64 49>
1914 %rmask = and <2 x i64> %lshr, <i64 255, i64 127>
1915 %lmask = and <2 x i64> %shl, <i64 65, i64 33>
1916 %or = or <2 x i64> %lmask, %rmask
1920 define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
1921 ; SSE-LABEL: splatconstant_rotate_mask_v4i32:
1923 ; SSE-NEXT: movdqa %xmm0, %xmm1
1924 ; SSE-NEXT: psrld $28, %xmm1
1925 ; SSE-NEXT: pslld $4, %xmm0
1926 ; SSE-NEXT: por %xmm1, %xmm0
1927 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
1930 ; AVX-LABEL: splatconstant_rotate_mask_v4i32:
1932 ; AVX-NEXT: vpsrld $28, %xmm0, %xmm1
1933 ; AVX-NEXT: vpslld $4, %xmm0, %xmm0
1934 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
1935 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1938 ; AVX512F-LABEL: splatconstant_rotate_mask_v4i32:
1940 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1941 ; AVX512F-NEXT: vprold $4, %zmm0, %zmm0
1942 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1943 ; AVX512F-NEXT: vzeroupper
1944 ; AVX512F-NEXT: retq
1946 ; AVX512VL-LABEL: splatconstant_rotate_mask_v4i32:
1947 ; AVX512VL: # %bb.0:
1948 ; AVX512VL-NEXT: vprold $4, %xmm0, %xmm0
1949 ; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1950 ; AVX512VL-NEXT: retq
1952 ; AVX512BW-LABEL: splatconstant_rotate_mask_v4i32:
1953 ; AVX512BW: # %bb.0:
1954 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1955 ; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
1956 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1957 ; AVX512BW-NEXT: vzeroupper
1958 ; AVX512BW-NEXT: retq
1960 ; AVX512VLBW-LABEL: splatconstant_rotate_mask_v4i32:
1961 ; AVX512VLBW: # %bb.0:
1962 ; AVX512VLBW-NEXT: vprold $4, %xmm0, %xmm0
1963 ; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1964 ; AVX512VLBW-NEXT: retq
1966 ; XOP-LABEL: splatconstant_rotate_mask_v4i32:
1968 ; XOP-NEXT: vprotd $4, %xmm0, %xmm0
1969 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1972 ; X32-SSE-LABEL: splatconstant_rotate_mask_v4i32:
1974 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1975 ; X32-SSE-NEXT: psrld $28, %xmm1
1976 ; X32-SSE-NEXT: pslld $4, %xmm0
1977 ; X32-SSE-NEXT: por %xmm1, %xmm0
1978 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
1979 ; X32-SSE-NEXT: retl
1980 %shl = shl <4 x i32> %a, <i32 4, i32 4, i32 4, i32 4>
1981 %lshr = lshr <4 x i32> %a, <i32 28, i32 28, i32 28, i32 28>
1982 %rmask = and <4 x i32> %lshr, <i32 127, i32 255, i32 511, i32 1023>
1983 %lmask = and <4 x i32> %shl, <i32 1023, i32 511, i32 255, i32 127>
1984 %or = or <4 x i32> %lmask, %rmask
1988 define <8 x i16> @splatconstant_rotate_mask_v8i16(<8 x i16> %a) nounwind {
1989 ; SSE-LABEL: splatconstant_rotate_mask_v8i16:
1991 ; SSE-NEXT: movdqa %xmm0, %xmm1
1992 ; SSE-NEXT: psrlw $11, %xmm1
1993 ; SSE-NEXT: psllw $5, %xmm0
1994 ; SSE-NEXT: por %xmm1, %xmm0
1995 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
1998 ; AVX-LABEL: splatconstant_rotate_mask_v8i16:
2000 ; AVX-NEXT: vpsrlw $11, %xmm0, %xmm1
2001 ; AVX-NEXT: vpsllw $5, %xmm0, %xmm0
2002 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
2003 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
2006 ; AVX512-LABEL: splatconstant_rotate_mask_v8i16:
2008 ; AVX512-NEXT: vpsrlw $11, %xmm0, %xmm1
2009 ; AVX512-NEXT: vpsllw $5, %xmm0, %xmm0
2010 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
2011 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
2014 ; XOP-LABEL: splatconstant_rotate_mask_v8i16:
2016 ; XOP-NEXT: vprotw $5, %xmm0, %xmm0
2017 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
2020 ; X32-SSE-LABEL: splatconstant_rotate_mask_v8i16:
2022 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2023 ; X32-SSE-NEXT: psrlw $11, %xmm1
2024 ; X32-SSE-NEXT: psllw $5, %xmm0
2025 ; X32-SSE-NEXT: por %xmm1, %xmm0
2026 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
2027 ; X32-SSE-NEXT: retl
2028 %shl = shl <8 x i16> %a, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
2029 %lshr = lshr <8 x i16> %a, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
2030 %rmask = and <8 x i16> %lshr, <i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55, i16 55>
2031 %lmask = and <8 x i16> %shl, <i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33, i16 33>
2032 %or = or <8 x i16> %lmask, %rmask
2036 define <16 x i8> @splatconstant_rotate_mask_v16i8(<16 x i8> %a) nounwind {
2037 ; SSE-LABEL: splatconstant_rotate_mask_v16i8:
2039 ; SSE-NEXT: movdqa %xmm0, %xmm1
2040 ; SSE-NEXT: psrlw $4, %xmm1
2041 ; SSE-NEXT: pand {{.*}}(%rip), %xmm1
2042 ; SSE-NEXT: psllw $4, %xmm0
2043 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
2044 ; SSE-NEXT: por %xmm1, %xmm0
2045 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
2048 ; AVX-LABEL: splatconstant_rotate_mask_v16i8:
2050 ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1
2051 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
2052 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm0
2053 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
2054 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
2055 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
2058 ; AVX512-LABEL: splatconstant_rotate_mask_v16i8:
2060 ; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
2061 ; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
2062 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
2063 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
2064 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
2067 ; XOP-LABEL: splatconstant_rotate_mask_v16i8:
2069 ; XOP-NEXT: vprotb $4, %xmm0, %xmm0
2070 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
2073 ; X32-SSE-LABEL: splatconstant_rotate_mask_v16i8:
2075 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2076 ; X32-SSE-NEXT: psrlw $4, %xmm1
2077 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
2078 ; X32-SSE-NEXT: psllw $4, %xmm0
2079 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
2080 ; X32-SSE-NEXT: por %xmm1, %xmm0
2081 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
2082 ; X32-SSE-NEXT: retl
2083 %shl = shl <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
2084 %lshr = lshr <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
2085 %rmask = and <16 x i8> %lshr, <i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55, i8 55>
2086 %lmask = and <16 x i8> %shl, <i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33, i8 33>
2087 %or = or <16 x i8> %lmask, %rmask
2091 define <4 x i32> @rot16_demandedbits(<4 x i32> %x, <4 x i32> %y) nounwind {
2092 ; X32-LABEL: rot16_demandedbits:
2094 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
2095 ; X32-NEXT: movl %eax, %ecx
2096 ; X32-NEXT: shrl $11, %ecx
2097 ; X32-NEXT: shll $5, %eax
2098 ; X32-NEXT: orl %ecx, %eax
2099 ; X32-NEXT: andl $65536, %eax # imm = 0x10000
2102 ; X64-LABEL: rot16_demandedbits:
2104 ; X64-NEXT: movl %edi, %eax
2105 ; X64-NEXT: movl %edi, %ecx
2106 ; X64-NEXT: shrl $11, %ecx
2107 ; X64-NEXT: shll $5, %eax
2108 ; X64-NEXT: orl %ecx, %eax
2109 ; X64-NEXT: andl $65536, %eax # imm = 0x10000
2111 ; SSE2-LABEL: rot16_demandedbits:
2113 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2114 ; SSE2-NEXT: psrld $11, %xmm1
2115 ; SSE2-NEXT: pslld $11, %xmm0
2116 ; SSE2-NEXT: por %xmm1, %xmm0
2117 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
2120 ; SSE41-LABEL: rot16_demandedbits:
2122 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2123 ; SSE41-NEXT: psrld $11, %xmm1
2124 ; SSE41-NEXT: pslld $11, %xmm0
2125 ; SSE41-NEXT: por %xmm1, %xmm0
2126 ; SSE41-NEXT: pxor %xmm1, %xmm1
2127 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
2130 ; AVX-LABEL: rot16_demandedbits:
2132 ; AVX-NEXT: vpsrld $11, %xmm0, %xmm1
2133 ; AVX-NEXT: vpslld $11, %xmm0, %xmm0
2134 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
2135 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
2136 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
2139 ; AVX512-LABEL: rot16_demandedbits:
2141 ; AVX512-NEXT: vpsrld $11, %xmm0, %xmm1
2142 ; AVX512-NEXT: vpslld $11, %xmm0, %xmm0
2143 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
2144 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
2145 ; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
2148 ; XOP-LABEL: rot16_demandedbits:
2150 ; XOP-NEXT: vpsrld $11, %xmm0, %xmm1
2151 ; XOP-NEXT: vpslld $11, %xmm0, %xmm0
2152 ; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
2153 ; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
2154 ; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
2157 ; X32-SSE-LABEL: rot16_demandedbits:
2159 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2160 ; X32-SSE-NEXT: psrld $11, %xmm1
2161 ; X32-SSE-NEXT: pslld $11, %xmm0
2162 ; X32-SSE-NEXT: por %xmm1, %xmm0
2163 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
2164 ; X32-SSE-NEXT: retl
2165 %t0 = lshr <4 x i32> %x, <i32 11, i32 11, i32 11, i32 11>
2166 %t1 = shl <4 x i32> %x, <i32 11, i32 11, i32 11, i32 11>
2167 %t2 = or <4 x i32> %t0, %t1
2168 %t3 = and <4 x i32> %t2, <i32 65535, i32 65535, i32 65535, i32 65535>
2172 define <4 x i16> @rot16_trunc(<4 x i32> %x, <4 x i32> %y) nounwind {
2173 ; SSE2-LABEL: rot16_trunc:
2175 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2176 ; SSE2-NEXT: psrld $11, %xmm1
2177 ; SSE2-NEXT: pslld $5, %xmm0
2178 ; SSE2-NEXT: por %xmm1, %xmm0
2179 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
2180 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
2181 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2184 ; SSE41-LABEL: rot16_trunc:
2186 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2187 ; SSE41-NEXT: psrld $11, %xmm1
2188 ; SSE41-NEXT: pslld $5, %xmm0
2189 ; SSE41-NEXT: por %xmm1, %xmm0
2190 ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
2193 ; AVX-LABEL: rot16_trunc:
2195 ; AVX-NEXT: vpsrld $11, %xmm0, %xmm1
2196 ; AVX-NEXT: vpslld $5, %xmm0, %xmm0
2197 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
2198 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
2201 ; AVX512-LABEL: rot16_trunc:
2203 ; AVX512-NEXT: vpsrld $11, %xmm0, %xmm1
2204 ; AVX512-NEXT: vpslld $5, %xmm0, %xmm0
2205 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
2206 ; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
2209 ; XOP-LABEL: rot16_trunc:
2211 ; XOP-NEXT: vpsrld $11, %xmm0, %xmm1
2212 ; XOP-NEXT: vpslld $5, %xmm0, %xmm0
2213 ; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
2214 ; XOP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
2217 ; X32-SSE-LABEL: rot16_trunc:
2219 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2220 ; X32-SSE-NEXT: psrld $11, %xmm1
2221 ; X32-SSE-NEXT: pslld $5, %xmm0
2222 ; X32-SSE-NEXT: por %xmm1, %xmm0
2223 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
2224 ; X32-SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
2225 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2226 ; X32-SSE-NEXT: retl
2227 %t0 = lshr <4 x i32> %x, <i32 11, i32 11, i32 11, i32 11>
2228 %t1 = shl <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
2229 %t2 = or <4 x i32> %t0, %t1
2230 %t3 = trunc <4 x i32> %t2 to <4 x i16>