1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512DQVL
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512BWVL
13 ; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
14 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
20 define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
21 ; SSE2-LABEL: var_shift_v2i64:
23 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
24 ; SSE2-NEXT: movdqa %xmm2, %xmm3
25 ; SSE2-NEXT: psrlq %xmm1, %xmm3
26 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
27 ; SSE2-NEXT: psrlq %xmm4, %xmm2
28 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
29 ; SSE2-NEXT: movdqa %xmm0, %xmm3
30 ; SSE2-NEXT: psrlq %xmm1, %xmm3
31 ; SSE2-NEXT: psrlq %xmm4, %xmm0
32 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
33 ; SSE2-NEXT: xorpd %xmm2, %xmm0
34 ; SSE2-NEXT: psubq %xmm2, %xmm0
37 ; SSE41-LABEL: var_shift_v2i64:
39 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
40 ; SSE41-NEXT: movdqa %xmm2, %xmm3
41 ; SSE41-NEXT: psrlq %xmm1, %xmm3
42 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
43 ; SSE41-NEXT: psrlq %xmm4, %xmm2
44 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
45 ; SSE41-NEXT: movdqa %xmm0, %xmm3
46 ; SSE41-NEXT: psrlq %xmm1, %xmm3
47 ; SSE41-NEXT: psrlq %xmm4, %xmm0
48 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
49 ; SSE41-NEXT: pxor %xmm2, %xmm0
50 ; SSE41-NEXT: psubq %xmm2, %xmm0
53 ; AVX1-LABEL: var_shift_v2i64:
55 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
56 ; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3
57 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
58 ; AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm2
59 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
60 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm1
61 ; AVX1-NEXT: vpsrlq %xmm4, %xmm0, %xmm0
62 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
63 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
64 ; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
67 ; AVX2-LABEL: var_shift_v2i64:
69 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
70 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm2
71 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
72 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
73 ; AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0
76 ; XOP-LABEL: var_shift_v2i64:
78 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
79 ; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
80 ; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
83 ; AVX512-LABEL: var_shift_v2i64:
85 ; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
86 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
87 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
88 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
89 ; AVX512-NEXT: vzeroupper
92 ; AVX512VL-LABEL: var_shift_v2i64:
94 ; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0
97 ; X32-SSE-LABEL: var_shift_v2i64:
99 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
100 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
101 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
102 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
103 ; X32-SSE-NEXT: psrlq %xmm4, %xmm2
104 ; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
105 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
106 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
107 ; X32-SSE-NEXT: psrlq %xmm4, %xmm0
108 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
109 ; X32-SSE-NEXT: xorpd %xmm2, %xmm0
110 ; X32-SSE-NEXT: psubq %xmm2, %xmm0
112 %shift = ashr <2 x i64> %a, %b
116 define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
117 ; SSE2-LABEL: var_shift_v4i32:
119 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
120 ; SSE2-NEXT: movdqa %xmm0, %xmm3
121 ; SSE2-NEXT: psrad %xmm2, %xmm3
122 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
123 ; SSE2-NEXT: movdqa %xmm0, %xmm2
124 ; SSE2-NEXT: psrad %xmm4, %xmm2
125 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
126 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
127 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
128 ; SSE2-NEXT: movdqa %xmm0, %xmm4
129 ; SSE2-NEXT: psrad %xmm3, %xmm4
130 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
131 ; SSE2-NEXT: psrad %xmm1, %xmm0
132 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
133 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
134 ; SSE2-NEXT: movaps %xmm2, %xmm0
137 ; SSE41-LABEL: var_shift_v4i32:
139 ; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
140 ; SSE41-NEXT: movdqa %xmm0, %xmm3
141 ; SSE41-NEXT: psrad %xmm2, %xmm3
142 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
143 ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
144 ; SSE41-NEXT: movdqa %xmm0, %xmm5
145 ; SSE41-NEXT: psrad %xmm4, %xmm5
146 ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
147 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
148 ; SSE41-NEXT: movdqa %xmm0, %xmm3
149 ; SSE41-NEXT: psrad %xmm1, %xmm3
150 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
151 ; SSE41-NEXT: psrad %xmm1, %xmm0
152 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
153 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
156 ; AVX1-LABEL: var_shift_v4i32:
158 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
159 ; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
160 ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
161 ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
162 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
163 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
164 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
165 ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
166 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
167 ; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
168 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
169 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
172 ; AVX2-LABEL: var_shift_v4i32:
174 ; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
177 ; XOPAVX1-LABEL: var_shift_v4i32:
179 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
180 ; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
181 ; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
184 ; XOPAVX2-LABEL: var_shift_v4i32:
186 ; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
189 ; AVX512-LABEL: var_shift_v4i32:
191 ; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
194 ; AVX512VL-LABEL: var_shift_v4i32:
196 ; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0
197 ; AVX512VL-NEXT: retq
199 ; X32-SSE-LABEL: var_shift_v4i32:
201 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
202 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
203 ; X32-SSE-NEXT: psrad %xmm2, %xmm3
204 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
205 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
206 ; X32-SSE-NEXT: psrad %xmm4, %xmm2
207 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
208 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
209 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
210 ; X32-SSE-NEXT: movdqa %xmm0, %xmm4
211 ; X32-SSE-NEXT: psrad %xmm3, %xmm4
212 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
213 ; X32-SSE-NEXT: psrad %xmm1, %xmm0
214 ; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
215 ; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
216 ; X32-SSE-NEXT: movaps %xmm2, %xmm0
218 %shift = ashr <4 x i32> %a, %b
222 define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
223 ; SSE2-LABEL: var_shift_v8i16:
225 ; SSE2-NEXT: psllw $12, %xmm1
226 ; SSE2-NEXT: movdqa %xmm1, %xmm2
227 ; SSE2-NEXT: psraw $15, %xmm2
228 ; SSE2-NEXT: movdqa %xmm2, %xmm3
229 ; SSE2-NEXT: pandn %xmm0, %xmm3
230 ; SSE2-NEXT: psraw $8, %xmm0
231 ; SSE2-NEXT: pand %xmm2, %xmm0
232 ; SSE2-NEXT: por %xmm3, %xmm0
233 ; SSE2-NEXT: paddw %xmm1, %xmm1
234 ; SSE2-NEXT: movdqa %xmm1, %xmm2
235 ; SSE2-NEXT: psraw $15, %xmm2
236 ; SSE2-NEXT: movdqa %xmm2, %xmm3
237 ; SSE2-NEXT: pandn %xmm0, %xmm3
238 ; SSE2-NEXT: psraw $4, %xmm0
239 ; SSE2-NEXT: pand %xmm2, %xmm0
240 ; SSE2-NEXT: por %xmm3, %xmm0
241 ; SSE2-NEXT: paddw %xmm1, %xmm1
242 ; SSE2-NEXT: movdqa %xmm1, %xmm2
243 ; SSE2-NEXT: psraw $15, %xmm2
244 ; SSE2-NEXT: movdqa %xmm2, %xmm3
245 ; SSE2-NEXT: pandn %xmm0, %xmm3
246 ; SSE2-NEXT: psraw $2, %xmm0
247 ; SSE2-NEXT: pand %xmm2, %xmm0
248 ; SSE2-NEXT: por %xmm3, %xmm0
249 ; SSE2-NEXT: paddw %xmm1, %xmm1
250 ; SSE2-NEXT: psraw $15, %xmm1
251 ; SSE2-NEXT: movdqa %xmm1, %xmm2
252 ; SSE2-NEXT: pandn %xmm0, %xmm2
253 ; SSE2-NEXT: psraw $1, %xmm0
254 ; SSE2-NEXT: pand %xmm1, %xmm0
255 ; SSE2-NEXT: por %xmm2, %xmm0
258 ; SSE41-LABEL: var_shift_v8i16:
260 ; SSE41-NEXT: movdqa %xmm1, %xmm2
261 ; SSE41-NEXT: movdqa %xmm0, %xmm1
262 ; SSE41-NEXT: movdqa %xmm2, %xmm0
263 ; SSE41-NEXT: psllw $12, %xmm0
264 ; SSE41-NEXT: psllw $4, %xmm2
265 ; SSE41-NEXT: por %xmm0, %xmm2
266 ; SSE41-NEXT: movdqa %xmm2, %xmm3
267 ; SSE41-NEXT: paddw %xmm2, %xmm3
268 ; SSE41-NEXT: movdqa %xmm1, %xmm4
269 ; SSE41-NEXT: psraw $8, %xmm4
270 ; SSE41-NEXT: movdqa %xmm2, %xmm0
271 ; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm1
272 ; SSE41-NEXT: movdqa %xmm1, %xmm2
273 ; SSE41-NEXT: psraw $4, %xmm2
274 ; SSE41-NEXT: movdqa %xmm3, %xmm0
275 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
276 ; SSE41-NEXT: movdqa %xmm1, %xmm2
277 ; SSE41-NEXT: psraw $2, %xmm2
278 ; SSE41-NEXT: paddw %xmm3, %xmm3
279 ; SSE41-NEXT: movdqa %xmm3, %xmm0
280 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
281 ; SSE41-NEXT: movdqa %xmm1, %xmm2
282 ; SSE41-NEXT: psraw $1, %xmm2
283 ; SSE41-NEXT: paddw %xmm3, %xmm3
284 ; SSE41-NEXT: movdqa %xmm3, %xmm0
285 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
286 ; SSE41-NEXT: movdqa %xmm1, %xmm0
289 ; AVX1-LABEL: var_shift_v8i16:
291 ; AVX1-NEXT: vpsllw $12, %xmm1, %xmm2
292 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
293 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
294 ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
295 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3
296 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
297 ; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
298 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
299 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
300 ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
301 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
302 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
303 ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
304 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
307 ; AVX2-LABEL: var_shift_v8i16:
309 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
310 ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
311 ; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
312 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
313 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
314 ; AVX2-NEXT: vzeroupper
317 ; XOP-LABEL: var_shift_v8i16:
319 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
320 ; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
321 ; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
324 ; AVX512DQ-LABEL: var_shift_v8i16:
326 ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
327 ; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
328 ; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
329 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
330 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
331 ; AVX512DQ-NEXT: vzeroupper
332 ; AVX512DQ-NEXT: retq
334 ; AVX512BW-LABEL: var_shift_v8i16:
336 ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
337 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
338 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
339 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
340 ; AVX512BW-NEXT: vzeroupper
341 ; AVX512BW-NEXT: retq
343 ; AVX512DQVL-LABEL: var_shift_v8i16:
344 ; AVX512DQVL: # %bb.0:
345 ; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
346 ; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
347 ; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
348 ; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
349 ; AVX512DQVL-NEXT: vzeroupper
350 ; AVX512DQVL-NEXT: retq
352 ; AVX512BWVL-LABEL: var_shift_v8i16:
353 ; AVX512BWVL: # %bb.0:
354 ; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0
355 ; AVX512BWVL-NEXT: retq
357 ; X32-SSE-LABEL: var_shift_v8i16:
359 ; X32-SSE-NEXT: psllw $12, %xmm1
360 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
361 ; X32-SSE-NEXT: psraw $15, %xmm2
362 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
363 ; X32-SSE-NEXT: pandn %xmm0, %xmm3
364 ; X32-SSE-NEXT: psraw $8, %xmm0
365 ; X32-SSE-NEXT: pand %xmm2, %xmm0
366 ; X32-SSE-NEXT: por %xmm3, %xmm0
367 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
368 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
369 ; X32-SSE-NEXT: psraw $15, %xmm2
370 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
371 ; X32-SSE-NEXT: pandn %xmm0, %xmm3
372 ; X32-SSE-NEXT: psraw $4, %xmm0
373 ; X32-SSE-NEXT: pand %xmm2, %xmm0
374 ; X32-SSE-NEXT: por %xmm3, %xmm0
375 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
376 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
377 ; X32-SSE-NEXT: psraw $15, %xmm2
378 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
379 ; X32-SSE-NEXT: pandn %xmm0, %xmm3
380 ; X32-SSE-NEXT: psraw $2, %xmm0
381 ; X32-SSE-NEXT: pand %xmm2, %xmm0
382 ; X32-SSE-NEXT: por %xmm3, %xmm0
383 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
384 ; X32-SSE-NEXT: psraw $15, %xmm1
385 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
386 ; X32-SSE-NEXT: pandn %xmm0, %xmm2
387 ; X32-SSE-NEXT: psraw $1, %xmm0
388 ; X32-SSE-NEXT: pand %xmm1, %xmm0
389 ; X32-SSE-NEXT: por %xmm2, %xmm0
391 %shift = ashr <8 x i16> %a, %b
395 define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
396 ; SSE2-LABEL: var_shift_v16i8:
398 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
399 ; SSE2-NEXT: psllw $5, %xmm1
400 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
401 ; SSE2-NEXT: pxor %xmm3, %xmm3
402 ; SSE2-NEXT: pxor %xmm5, %xmm5
403 ; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
404 ; SSE2-NEXT: movdqa %xmm5, %xmm6
405 ; SSE2-NEXT: pandn %xmm2, %xmm6
406 ; SSE2-NEXT: psraw $4, %xmm2
407 ; SSE2-NEXT: pand %xmm5, %xmm2
408 ; SSE2-NEXT: por %xmm6, %xmm2
409 ; SSE2-NEXT: paddw %xmm4, %xmm4
410 ; SSE2-NEXT: pxor %xmm5, %xmm5
411 ; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
412 ; SSE2-NEXT: movdqa %xmm5, %xmm6
413 ; SSE2-NEXT: pandn %xmm2, %xmm6
414 ; SSE2-NEXT: psraw $2, %xmm2
415 ; SSE2-NEXT: pand %xmm5, %xmm2
416 ; SSE2-NEXT: por %xmm6, %xmm2
417 ; SSE2-NEXT: paddw %xmm4, %xmm4
418 ; SSE2-NEXT: pxor %xmm5, %xmm5
419 ; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
420 ; SSE2-NEXT: movdqa %xmm5, %xmm4
421 ; SSE2-NEXT: pandn %xmm2, %xmm4
422 ; SSE2-NEXT: psraw $1, %xmm2
423 ; SSE2-NEXT: pand %xmm5, %xmm2
424 ; SSE2-NEXT: por %xmm4, %xmm2
425 ; SSE2-NEXT: psrlw $8, %xmm2
426 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
427 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
428 ; SSE2-NEXT: pxor %xmm4, %xmm4
429 ; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
430 ; SSE2-NEXT: movdqa %xmm4, %xmm5
431 ; SSE2-NEXT: pandn %xmm0, %xmm5
432 ; SSE2-NEXT: psraw $4, %xmm0
433 ; SSE2-NEXT: pand %xmm4, %xmm0
434 ; SSE2-NEXT: por %xmm5, %xmm0
435 ; SSE2-NEXT: paddw %xmm1, %xmm1
436 ; SSE2-NEXT: pxor %xmm4, %xmm4
437 ; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
438 ; SSE2-NEXT: movdqa %xmm4, %xmm5
439 ; SSE2-NEXT: pandn %xmm0, %xmm5
440 ; SSE2-NEXT: psraw $2, %xmm0
441 ; SSE2-NEXT: pand %xmm4, %xmm0
442 ; SSE2-NEXT: por %xmm5, %xmm0
443 ; SSE2-NEXT: paddw %xmm1, %xmm1
444 ; SSE2-NEXT: pcmpgtw %xmm1, %xmm3
445 ; SSE2-NEXT: movdqa %xmm3, %xmm1
446 ; SSE2-NEXT: pandn %xmm0, %xmm1
447 ; SSE2-NEXT: psraw $1, %xmm0
448 ; SSE2-NEXT: pand %xmm3, %xmm0
449 ; SSE2-NEXT: por %xmm1, %xmm0
450 ; SSE2-NEXT: psrlw $8, %xmm0
451 ; SSE2-NEXT: packuswb %xmm2, %xmm0
454 ; SSE41-LABEL: var_shift_v16i8:
456 ; SSE41-NEXT: movdqa %xmm0, %xmm2
457 ; SSE41-NEXT: psllw $5, %xmm1
458 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
459 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
460 ; SSE41-NEXT: movdqa %xmm3, %xmm4
461 ; SSE41-NEXT: psraw $4, %xmm4
462 ; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
463 ; SSE41-NEXT: movdqa %xmm3, %xmm4
464 ; SSE41-NEXT: psraw $2, %xmm4
465 ; SSE41-NEXT: paddw %xmm0, %xmm0
466 ; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
467 ; SSE41-NEXT: movdqa %xmm3, %xmm4
468 ; SSE41-NEXT: psraw $1, %xmm4
469 ; SSE41-NEXT: paddw %xmm0, %xmm0
470 ; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
471 ; SSE41-NEXT: psrlw $8, %xmm3
472 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
473 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
474 ; SSE41-NEXT: movdqa %xmm1, %xmm2
475 ; SSE41-NEXT: psraw $4, %xmm2
476 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
477 ; SSE41-NEXT: movdqa %xmm1, %xmm2
478 ; SSE41-NEXT: psraw $2, %xmm2
479 ; SSE41-NEXT: paddw %xmm0, %xmm0
480 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
481 ; SSE41-NEXT: movdqa %xmm1, %xmm2
482 ; SSE41-NEXT: psraw $1, %xmm2
483 ; SSE41-NEXT: paddw %xmm0, %xmm0
484 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
485 ; SSE41-NEXT: psrlw $8, %xmm1
486 ; SSE41-NEXT: packuswb %xmm3, %xmm1
487 ; SSE41-NEXT: movdqa %xmm1, %xmm0
490 ; AVX-LABEL: var_shift_v16i8:
492 ; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
493 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
494 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
495 ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4
496 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
497 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4
498 ; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
499 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
500 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4
501 ; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2
502 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
503 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
504 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
505 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
506 ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3
507 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
508 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3
509 ; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
510 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
511 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3
512 ; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1
513 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
514 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
515 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
518 ; XOP-LABEL: var_shift_v16i8:
520 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
521 ; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
522 ; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
525 ; AVX512DQ-LABEL: var_shift_v16i8:
527 ; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
528 ; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
529 ; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
530 ; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
531 ; AVX512DQ-NEXT: vzeroupper
532 ; AVX512DQ-NEXT: retq
534 ; AVX512BW-LABEL: var_shift_v16i8:
536 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
537 ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
538 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
539 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
540 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
541 ; AVX512BW-NEXT: vzeroupper
542 ; AVX512BW-NEXT: retq
544 ; AVX512DQVL-LABEL: var_shift_v16i8:
545 ; AVX512DQVL: # %bb.0:
546 ; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
547 ; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
548 ; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
549 ; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
550 ; AVX512DQVL-NEXT: vzeroupper
551 ; AVX512DQVL-NEXT: retq
553 ; AVX512BWVL-LABEL: var_shift_v16i8:
554 ; AVX512BWVL: # %bb.0:
555 ; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
556 ; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
557 ; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
558 ; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
559 ; AVX512BWVL-NEXT: vzeroupper
560 ; AVX512BWVL-NEXT: retq
562 ; X32-SSE-LABEL: var_shift_v16i8:
564 ; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
565 ; X32-SSE-NEXT: psllw $5, %xmm1
566 ; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
567 ; X32-SSE-NEXT: pxor %xmm3, %xmm3
568 ; X32-SSE-NEXT: pxor %xmm5, %xmm5
569 ; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
570 ; X32-SSE-NEXT: movdqa %xmm5, %xmm6
571 ; X32-SSE-NEXT: pandn %xmm2, %xmm6
572 ; X32-SSE-NEXT: psraw $4, %xmm2
573 ; X32-SSE-NEXT: pand %xmm5, %xmm2
574 ; X32-SSE-NEXT: por %xmm6, %xmm2
575 ; X32-SSE-NEXT: paddw %xmm4, %xmm4
576 ; X32-SSE-NEXT: pxor %xmm5, %xmm5
577 ; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
578 ; X32-SSE-NEXT: movdqa %xmm5, %xmm6
579 ; X32-SSE-NEXT: pandn %xmm2, %xmm6
580 ; X32-SSE-NEXT: psraw $2, %xmm2
581 ; X32-SSE-NEXT: pand %xmm5, %xmm2
582 ; X32-SSE-NEXT: por %xmm6, %xmm2
583 ; X32-SSE-NEXT: paddw %xmm4, %xmm4
584 ; X32-SSE-NEXT: pxor %xmm5, %xmm5
585 ; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
586 ; X32-SSE-NEXT: movdqa %xmm5, %xmm4
587 ; X32-SSE-NEXT: pandn %xmm2, %xmm4
588 ; X32-SSE-NEXT: psraw $1, %xmm2
589 ; X32-SSE-NEXT: pand %xmm5, %xmm2
590 ; X32-SSE-NEXT: por %xmm4, %xmm2
591 ; X32-SSE-NEXT: psrlw $8, %xmm2
592 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
593 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
594 ; X32-SSE-NEXT: pxor %xmm4, %xmm4
595 ; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
596 ; X32-SSE-NEXT: movdqa %xmm4, %xmm5
597 ; X32-SSE-NEXT: pandn %xmm0, %xmm5
598 ; X32-SSE-NEXT: psraw $4, %xmm0
599 ; X32-SSE-NEXT: pand %xmm4, %xmm0
600 ; X32-SSE-NEXT: por %xmm5, %xmm0
601 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
602 ; X32-SSE-NEXT: pxor %xmm4, %xmm4
603 ; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm4
604 ; X32-SSE-NEXT: movdqa %xmm4, %xmm5
605 ; X32-SSE-NEXT: pandn %xmm0, %xmm5
606 ; X32-SSE-NEXT: psraw $2, %xmm0
607 ; X32-SSE-NEXT: pand %xmm4, %xmm0
608 ; X32-SSE-NEXT: por %xmm5, %xmm0
609 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
610 ; X32-SSE-NEXT: pcmpgtw %xmm1, %xmm3
611 ; X32-SSE-NEXT: movdqa %xmm3, %xmm1
612 ; X32-SSE-NEXT: pandn %xmm0, %xmm1
613 ; X32-SSE-NEXT: psraw $1, %xmm0
614 ; X32-SSE-NEXT: pand %xmm3, %xmm0
615 ; X32-SSE-NEXT: por %xmm1, %xmm0
616 ; X32-SSE-NEXT: psrlw $8, %xmm0
617 ; X32-SSE-NEXT: packuswb %xmm2, %xmm0
619 %shift = ashr <16 x i8> %a, %b
624 ; Uniform Variable Shifts
627 define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
628 ; SSE-LABEL: splatvar_shift_v2i64:
630 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
631 ; SSE-NEXT: psrlq %xmm1, %xmm2
632 ; SSE-NEXT: psrlq %xmm1, %xmm0
633 ; SSE-NEXT: pxor %xmm2, %xmm0
634 ; SSE-NEXT: psubq %xmm2, %xmm0
637 ; AVX-LABEL: splatvar_shift_v2i64:
639 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
640 ; AVX-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
641 ; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
642 ; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
643 ; AVX-NEXT: vpsubq %xmm2, %xmm0, %xmm0
646 ; XOPAVX1-LABEL: splatvar_shift_v2i64:
648 ; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
649 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
650 ; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
651 ; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
654 ; XOPAVX2-LABEL: splatvar_shift_v2i64:
656 ; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1
657 ; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
658 ; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
659 ; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0
662 ; AVX512-LABEL: splatvar_shift_v2i64:
664 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
665 ; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
666 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
667 ; AVX512-NEXT: vzeroupper
670 ; AVX512VL-LABEL: splatvar_shift_v2i64:
672 ; AVX512VL-NEXT: vpsraq %xmm1, %xmm0, %xmm0
673 ; AVX512VL-NEXT: retq
675 ; X32-SSE-LABEL: splatvar_shift_v2i64:
677 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
678 ; X32-SSE-NEXT: psrlq %xmm1, %xmm2
679 ; X32-SSE-NEXT: psrlq %xmm1, %xmm0
680 ; X32-SSE-NEXT: pxor %xmm2, %xmm0
681 ; X32-SSE-NEXT: psubq %xmm2, %xmm0
683 %splat = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> zeroinitializer
684 %shift = ashr <2 x i64> %a, %splat
688 define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
689 ; SSE2-LABEL: splatvar_shift_v4i32:
691 ; SSE2-NEXT: xorps %xmm2, %xmm2
692 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
693 ; SSE2-NEXT: psrad %xmm2, %xmm0
696 ; SSE41-LABEL: splatvar_shift_v4i32:
698 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
699 ; SSE41-NEXT: psrad %xmm1, %xmm0
702 ; AVX-LABEL: splatvar_shift_v4i32:
704 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
705 ; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
708 ; XOP-LABEL: splatvar_shift_v4i32:
710 ; XOP-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
711 ; XOP-NEXT: vpsrad %xmm1, %xmm0, %xmm0
714 ; AVX512-LABEL: splatvar_shift_v4i32:
716 ; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
717 ; AVX512-NEXT: vpsrad %xmm1, %xmm0, %xmm0
720 ; AVX512VL-LABEL: splatvar_shift_v4i32:
722 ; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
723 ; AVX512VL-NEXT: vpsrad %xmm1, %xmm0, %xmm0
724 ; AVX512VL-NEXT: retq
726 ; X32-SSE-LABEL: splatvar_shift_v4i32:
728 ; X32-SSE-NEXT: xorps %xmm2, %xmm2
729 ; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
730 ; X32-SSE-NEXT: psrad %xmm2, %xmm0
732 %splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
733 %shift = ashr <4 x i32> %a, %splat
737 define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
738 ; SSE2-LABEL: splatvar_shift_v8i16:
740 ; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
741 ; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
742 ; SSE2-NEXT: psraw %xmm1, %xmm0
745 ; SSE41-LABEL: splatvar_shift_v8i16:
747 ; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
748 ; SSE41-NEXT: psraw %xmm1, %xmm0
751 ; AVX-LABEL: splatvar_shift_v8i16:
753 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
754 ; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
757 ; XOP-LABEL: splatvar_shift_v8i16:
759 ; XOP-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
760 ; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0
763 ; AVX512-LABEL: splatvar_shift_v8i16:
765 ; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
766 ; AVX512-NEXT: vpsraw %xmm1, %xmm0, %xmm0
769 ; AVX512VL-LABEL: splatvar_shift_v8i16:
771 ; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
772 ; AVX512VL-NEXT: vpsraw %xmm1, %xmm0, %xmm0
773 ; AVX512VL-NEXT: retq
775 ; X32-SSE-LABEL: splatvar_shift_v8i16:
777 ; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
778 ; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
779 ; X32-SSE-NEXT: psraw %xmm1, %xmm0
781 %splat = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
782 %shift = ashr <8 x i16> %a, %splat
786 define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
787 ; SSE2-LABEL: splatvar_shift_v16i8:
789 ; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
790 ; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
791 ; SSE2-NEXT: psrlw %xmm1, %xmm0
792 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
793 ; SSE2-NEXT: psrlw %xmm1, %xmm2
794 ; SSE2-NEXT: psrlw $8, %xmm2
795 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
796 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
797 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
798 ; SSE2-NEXT: pand %xmm2, %xmm0
799 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
800 ; SSE2-NEXT: psrlw %xmm1, %xmm2
801 ; SSE2-NEXT: pxor %xmm2, %xmm0
802 ; SSE2-NEXT: psubb %xmm2, %xmm0
805 ; SSE41-LABEL: splatvar_shift_v16i8:
807 ; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
808 ; SSE41-NEXT: psrlw %xmm1, %xmm0
809 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
810 ; SSE41-NEXT: psrlw %xmm1, %xmm2
811 ; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
812 ; SSE41-NEXT: pand %xmm2, %xmm0
813 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
814 ; SSE41-NEXT: psrlw %xmm1, %xmm2
815 ; SSE41-NEXT: pxor %xmm2, %xmm0
816 ; SSE41-NEXT: psubb %xmm2, %xmm0
819 ; AVX1-LABEL: splatvar_shift_v16i8:
821 ; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
822 ; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
823 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
824 ; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
825 ; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
826 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
827 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
828 ; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
829 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
830 ; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
833 ; AVX2-LABEL: splatvar_shift_v16i8:
835 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
836 ; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
837 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
838 ; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
839 ; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
840 ; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2
841 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
842 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
843 ; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
844 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
845 ; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
848 ; XOPAVX1-LABEL: splatvar_shift_v16i8:
850 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
851 ; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
852 ; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
853 ; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
856 ; XOPAVX2-LABEL: splatvar_shift_v16i8:
858 ; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
859 ; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
860 ; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
861 ; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
864 ; AVX512DQ-LABEL: splatvar_shift_v16i8:
866 ; AVX512DQ-NEXT: vpbroadcastb %xmm1, %xmm1
867 ; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
868 ; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
869 ; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
870 ; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
871 ; AVX512DQ-NEXT: vzeroupper
872 ; AVX512DQ-NEXT: retq
874 ; AVX512BW-LABEL: splatvar_shift_v16i8:
876 ; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
877 ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
878 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
879 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
880 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
881 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
882 ; AVX512BW-NEXT: vzeroupper
883 ; AVX512BW-NEXT: retq
885 ; AVX512DQVL-LABEL: splatvar_shift_v16i8:
886 ; AVX512DQVL: # %bb.0:
887 ; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %xmm1
888 ; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
889 ; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
890 ; AVX512DQVL-NEXT: vpsravd %zmm1, %zmm0, %zmm0
891 ; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
892 ; AVX512DQVL-NEXT: vzeroupper
893 ; AVX512DQVL-NEXT: retq
895 ; AVX512BWVL-LABEL: splatvar_shift_v16i8:
896 ; AVX512BWVL: # %bb.0:
897 ; AVX512BWVL-NEXT: vpbroadcastb %xmm1, %xmm1
898 ; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
899 ; AVX512BWVL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
900 ; AVX512BWVL-NEXT: vpsravw %ymm1, %ymm0, %ymm0
901 ; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
902 ; AVX512BWVL-NEXT: vzeroupper
903 ; AVX512BWVL-NEXT: retq
905 ; X32-SSE-LABEL: splatvar_shift_v16i8:
907 ; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
908 ; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
909 ; X32-SSE-NEXT: psrlw %xmm1, %xmm0
910 ; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
911 ; X32-SSE-NEXT: psrlw %xmm1, %xmm2
912 ; X32-SSE-NEXT: psrlw $8, %xmm2
913 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
914 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
915 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
916 ; X32-SSE-NEXT: pand %xmm2, %xmm0
917 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
918 ; X32-SSE-NEXT: psrlw %xmm1, %xmm2
919 ; X32-SSE-NEXT: pxor %xmm2, %xmm0
920 ; X32-SSE-NEXT: psubb %xmm2, %xmm0
922 %splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
923 %shift = ashr <16 x i8> %a, %splat
931 define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
932 ; SSE2-LABEL: constant_shift_v2i64:
934 ; SSE2-NEXT: movdqa %xmm0, %xmm1
935 ; SSE2-NEXT: psrlq $1, %xmm1
936 ; SSE2-NEXT: psrlq $7, %xmm0
937 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
938 ; SSE2-NEXT: movapd {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
939 ; SSE2-NEXT: xorpd %xmm1, %xmm0
940 ; SSE2-NEXT: psubq %xmm1, %xmm0
943 ; SSE41-LABEL: constant_shift_v2i64:
945 ; SSE41-NEXT: movdqa %xmm0, %xmm1
946 ; SSE41-NEXT: psrlq $7, %xmm1
947 ; SSE41-NEXT: psrlq $1, %xmm0
948 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
949 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
950 ; SSE41-NEXT: pxor %xmm1, %xmm0
951 ; SSE41-NEXT: psubq %xmm1, %xmm0
954 ; AVX1-LABEL: constant_shift_v2i64:
956 ; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
957 ; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
958 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
959 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
960 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
961 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
964 ; AVX2-LABEL: constant_shift_v2i64:
966 ; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
967 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
968 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
969 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
972 ; XOP-LABEL: constant_shift_v2i64:
974 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
977 ; AVX512-LABEL: constant_shift_v2i64:
979 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
980 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [1,7]
981 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
982 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
983 ; AVX512-NEXT: vzeroupper
986 ; AVX512VL-LABEL: constant_shift_v2i64:
988 ; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0
989 ; AVX512VL-NEXT: retq
991 ; X32-SSE-LABEL: constant_shift_v2i64:
993 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
994 ; X32-SSE-NEXT: psrlq $1, %xmm1
995 ; X32-SSE-NEXT: psrlq $7, %xmm0
996 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
997 ; X32-SSE-NEXT: movapd {{.*#+}} xmm1 = [2.0E+0,7.2911220195563975E-304]
998 ; X32-SSE-NEXT: xorpd %xmm1, %xmm0
999 ; X32-SSE-NEXT: psubq %xmm1, %xmm0
1000 ; X32-SSE-NEXT: retl
1001 %shift = ashr <2 x i64> %a, <i64 1, i64 7>
1002 ret <2 x i64> %shift
1005 define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
1006 ; SSE2-LABEL: constant_shift_v4i32:
1008 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1009 ; SSE2-NEXT: psrad $7, %xmm1
1010 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1011 ; SSE2-NEXT: psrad $6, %xmm2
1012 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
1013 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1014 ; SSE2-NEXT: psrad $5, %xmm1
1015 ; SSE2-NEXT: psrad $4, %xmm0
1016 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1017 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
1020 ; SSE41-LABEL: constant_shift_v4i32:
1022 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1023 ; SSE41-NEXT: psrad $7, %xmm1
1024 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1025 ; SSE41-NEXT: psrad $5, %xmm2
1026 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
1027 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1028 ; SSE41-NEXT: psrad $6, %xmm1
1029 ; SSE41-NEXT: psrad $4, %xmm0
1030 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
1031 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1034 ; AVX1-LABEL: constant_shift_v4i32:
1036 ; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
1037 ; AVX1-NEXT: vpsrad $5, %xmm0, %xmm2
1038 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
1039 ; AVX1-NEXT: vpsrad $6, %xmm0, %xmm2
1040 ; AVX1-NEXT: vpsrad $4, %xmm0, %xmm0
1041 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
1042 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
1045 ; AVX2-LABEL: constant_shift_v4i32:
1047 ; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
1050 ; XOPAVX1-LABEL: constant_shift_v4i32:
1052 ; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
1053 ; XOPAVX1-NEXT: retq
1055 ; XOPAVX2-LABEL: constant_shift_v4i32:
1057 ; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
1058 ; XOPAVX2-NEXT: retq
1060 ; AVX512-LABEL: constant_shift_v4i32:
1062 ; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
1065 ; AVX512VL-LABEL: constant_shift_v4i32:
1066 ; AVX512VL: # %bb.0:
1067 ; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
1068 ; AVX512VL-NEXT: retq
1070 ; X32-SSE-LABEL: constant_shift_v4i32:
1072 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1073 ; X32-SSE-NEXT: psrad $7, %xmm1
1074 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
1075 ; X32-SSE-NEXT: psrad $6, %xmm2
1076 ; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
1077 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1078 ; X32-SSE-NEXT: psrad $5, %xmm1
1079 ; X32-SSE-NEXT: psrad $4, %xmm0
1080 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1081 ; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
1082 ; X32-SSE-NEXT: retl
1083 %shift = ashr <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7>
1084 ret <4 x i32> %shift
1087 define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
1088 ; SSE2-LABEL: constant_shift_v8i16:
1090 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1091 ; SSE2-NEXT: psraw $4, %xmm1
1092 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1093 ; SSE2-NEXT: movapd %xmm1, %xmm2
1094 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[2,3]
1095 ; SSE2-NEXT: psraw $2, %xmm1
1096 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
1097 ; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1098 ; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
1099 ; SSE2-NEXT: movaps %xmm2, %xmm0
1100 ; SSE2-NEXT: andps %xmm1, %xmm0
1101 ; SSE2-NEXT: psraw $1, %xmm2
1102 ; SSE2-NEXT: andnps %xmm2, %xmm1
1103 ; SSE2-NEXT: orps %xmm1, %xmm0
1106 ; SSE41-LABEL: constant_shift_v8i16:
1108 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = <u,32768,16384,8192,4096,2048,1024,512>
1109 ; SSE41-NEXT: pmulhw %xmm0, %xmm1
1110 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
1111 ; SSE41-NEXT: psraw $1, %xmm0
1112 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7]
1115 ; AVX-LABEL: constant_shift_v8i16:
1117 ; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1
1118 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
1119 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
1120 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7]
1123 ; XOP-LABEL: constant_shift_v8i16:
1125 ; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0
1128 ; AVX512DQ-LABEL: constant_shift_v8i16:
1129 ; AVX512DQ: # %bb.0:
1130 ; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
1131 ; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
1132 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
1133 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1134 ; AVX512DQ-NEXT: vzeroupper
1135 ; AVX512DQ-NEXT: retq
1137 ; AVX512BW-LABEL: constant_shift_v8i16:
1138 ; AVX512BW: # %bb.0:
1139 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1140 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
1141 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
1142 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1143 ; AVX512BW-NEXT: vzeroupper
1144 ; AVX512BW-NEXT: retq
1146 ; AVX512DQVL-LABEL: constant_shift_v8i16:
1147 ; AVX512DQVL: # %bb.0:
1148 ; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
1149 ; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
1150 ; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
1151 ; AVX512DQVL-NEXT: vzeroupper
1152 ; AVX512DQVL-NEXT: retq
1154 ; AVX512BWVL-LABEL: constant_shift_v8i16:
1155 ; AVX512BWVL: # %bb.0:
1156 ; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0
1157 ; AVX512BWVL-NEXT: retq
1159 ; X32-SSE-LABEL: constant_shift_v8i16:
1161 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1162 ; X32-SSE-NEXT: psraw $4, %xmm1
1163 ; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1164 ; X32-SSE-NEXT: movapd %xmm1, %xmm2
1165 ; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[2,3]
1166 ; X32-SSE-NEXT: psraw $2, %xmm1
1167 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
1168 ; X32-SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1169 ; X32-SSE-NEXT: movaps {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0]
1170 ; X32-SSE-NEXT: movaps %xmm2, %xmm0
1171 ; X32-SSE-NEXT: andps %xmm1, %xmm0
1172 ; X32-SSE-NEXT: psraw $1, %xmm2
1173 ; X32-SSE-NEXT: andnps %xmm2, %xmm1
1174 ; X32-SSE-NEXT: orps %xmm1, %xmm0
1175 ; X32-SSE-NEXT: retl
1176 %shift = ashr <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
1177 ret <8 x i16> %shift
1180 define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
1181 ; SSE-LABEL: constant_shift_v16i8:
1183 ; SSE-NEXT: movdqa %xmm0, %xmm1
1184 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
1185 ; SSE-NEXT: psraw $8, %xmm1
1186 ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm1
1187 ; SSE-NEXT: psrlw $8, %xmm1
1188 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1189 ; SSE-NEXT: psraw $8, %xmm0
1190 ; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
1191 ; SSE-NEXT: psrlw $8, %xmm0
1192 ; SSE-NEXT: packuswb %xmm1, %xmm0
1195 ; AVX1-LABEL: constant_shift_v16i8:
1197 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
1198 ; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
1199 ; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
1200 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
1201 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1202 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
1203 ; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
1204 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
1205 ; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
1208 ; AVX2-LABEL: constant_shift_v16i8:
1210 ; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
1211 ; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
1212 ; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
1213 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1214 ; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
1215 ; AVX2-NEXT: vzeroupper
1218 ; XOP-LABEL: constant_shift_v16i8:
1220 ; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0
1223 ; AVX512DQ-LABEL: constant_shift_v16i8:
1224 ; AVX512DQ: # %bb.0:
1225 ; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
1226 ; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
1227 ; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
1228 ; AVX512DQ-NEXT: vzeroupper
1229 ; AVX512DQ-NEXT: retq
1231 ; AVX512BW-LABEL: constant_shift_v16i8:
1232 ; AVX512BW: # %bb.0:
1233 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
1234 ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
1235 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
1236 ; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
1237 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1238 ; AVX512BW-NEXT: vzeroupper
1239 ; AVX512BW-NEXT: retq
1241 ; AVX512DQVL-LABEL: constant_shift_v16i8:
1242 ; AVX512DQVL: # %bb.0:
1243 ; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %zmm0
1244 ; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
1245 ; AVX512DQVL-NEXT: vpmovdb %zmm0, %xmm0
1246 ; AVX512DQVL-NEXT: vzeroupper
1247 ; AVX512DQVL-NEXT: retq
1249 ; AVX512BWVL-LABEL: constant_shift_v16i8:
1250 ; AVX512BWVL: # %bb.0:
1251 ; AVX512BWVL-NEXT: vpmovsxbw %xmm0, %ymm0
1252 ; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %ymm0, %ymm0
1253 ; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
1254 ; AVX512BWVL-NEXT: vzeroupper
1255 ; AVX512BWVL-NEXT: retq
1257 ; X32-SSE-LABEL: constant_shift_v16i8:
1259 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1260 ; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
1261 ; X32-SSE-NEXT: psraw $8, %xmm1
1262 ; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm1
1263 ; X32-SSE-NEXT: psrlw $8, %xmm1
1264 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1265 ; X32-SSE-NEXT: psraw $8, %xmm0
1266 ; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
1267 ; X32-SSE-NEXT: psrlw $8, %xmm0
1268 ; X32-SSE-NEXT: packuswb %xmm1, %xmm0
1269 ; X32-SSE-NEXT: retl
1270 %shift = ashr <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
1271 ret <16 x i8> %shift
1275 ; Uniform Constant Shifts
1278 define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
1279 ; SSE2-LABEL: splatconstant_shift_v2i64:
1281 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1282 ; SSE2-NEXT: psrad $7, %xmm1
1283 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
1284 ; SSE2-NEXT: psrlq $7, %xmm0
1285 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1286 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1289 ; SSE41-LABEL: splatconstant_shift_v2i64:
1291 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1292 ; SSE41-NEXT: psrad $7, %xmm1
1293 ; SSE41-NEXT: psrlq $7, %xmm0
1294 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
1297 ; AVX1-LABEL: splatconstant_shift_v2i64:
1299 ; AVX1-NEXT: vpsrad $7, %xmm0, %xmm1
1300 ; AVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
1301 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
1304 ; AVX2-LABEL: splatconstant_shift_v2i64:
1306 ; AVX2-NEXT: vpsrad $7, %xmm0, %xmm1
1307 ; AVX2-NEXT: vpsrlq $7, %xmm0, %xmm0
1308 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
1311 ; XOP-LABEL: splatconstant_shift_v2i64:
1313 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
1316 ; AVX512-LABEL: splatconstant_shift_v2i64:
1318 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1319 ; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
1320 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1321 ; AVX512-NEXT: vzeroupper
1324 ; AVX512VL-LABEL: splatconstant_shift_v2i64:
1325 ; AVX512VL: # %bb.0:
1326 ; AVX512VL-NEXT: vpsraq $7, %xmm0, %xmm0
1327 ; AVX512VL-NEXT: retq
1329 ; X32-SSE-LABEL: splatconstant_shift_v2i64:
1331 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1332 ; X32-SSE-NEXT: psrad $7, %xmm1
1333 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
1334 ; X32-SSE-NEXT: psrlq $7, %xmm0
1335 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1336 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1337 ; X32-SSE-NEXT: retl
1338 %shift = ashr <2 x i64> %a, <i64 7, i64 7>
1339 ret <2 x i64> %shift
1342 define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
1343 ; SSE-LABEL: splatconstant_shift_v4i32:
1345 ; SSE-NEXT: psrad $5, %xmm0
1348 ; AVX-LABEL: splatconstant_shift_v4i32:
1350 ; AVX-NEXT: vpsrad $5, %xmm0, %xmm0
1353 ; XOP-LABEL: splatconstant_shift_v4i32:
1355 ; XOP-NEXT: vpsrad $5, %xmm0, %xmm0
1358 ; AVX512-LABEL: splatconstant_shift_v4i32:
1360 ; AVX512-NEXT: vpsrad $5, %xmm0, %xmm0
1363 ; AVX512VL-LABEL: splatconstant_shift_v4i32:
1364 ; AVX512VL: # %bb.0:
1365 ; AVX512VL-NEXT: vpsrad $5, %xmm0, %xmm0
1366 ; AVX512VL-NEXT: retq
1368 ; X32-SSE-LABEL: splatconstant_shift_v4i32:
1370 ; X32-SSE-NEXT: psrad $5, %xmm0
1371 ; X32-SSE-NEXT: retl
1372 %shift = ashr <4 x i32> %a, <i32 5, i32 5, i32 5, i32 5>
1373 ret <4 x i32> %shift
1376 define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
1377 ; SSE-LABEL: splatconstant_shift_v8i16:
1379 ; SSE-NEXT: psraw $3, %xmm0
1382 ; AVX-LABEL: splatconstant_shift_v8i16:
1384 ; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
1387 ; XOP-LABEL: splatconstant_shift_v8i16:
1389 ; XOP-NEXT: vpsraw $3, %xmm0, %xmm0
1392 ; AVX512-LABEL: splatconstant_shift_v8i16:
1394 ; AVX512-NEXT: vpsraw $3, %xmm0, %xmm0
1397 ; AVX512VL-LABEL: splatconstant_shift_v8i16:
1398 ; AVX512VL: # %bb.0:
1399 ; AVX512VL-NEXT: vpsraw $3, %xmm0, %xmm0
1400 ; AVX512VL-NEXT: retq
1402 ; X32-SSE-LABEL: splatconstant_shift_v8i16:
1404 ; X32-SSE-NEXT: psraw $3, %xmm0
1405 ; X32-SSE-NEXT: retl
1406 %shift = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
1407 ret <8 x i16> %shift
1410 define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
1411 ; SSE-LABEL: splatconstant_shift_v16i8:
1413 ; SSE-NEXT: psrlw $3, %xmm0
1414 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
1415 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
1416 ; SSE-NEXT: pxor %xmm1, %xmm0
1417 ; SSE-NEXT: psubb %xmm1, %xmm0
1420 ; AVX-LABEL: splatconstant_shift_v16i8:
1422 ; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
1423 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1424 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
1425 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
1426 ; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
1429 ; XOP-LABEL: splatconstant_shift_v16i8:
1431 ; XOP-NEXT: vpshab {{.*}}(%rip), %xmm0, %xmm0
1434 ; AVX512-LABEL: splatconstant_shift_v16i8:
1436 ; AVX512-NEXT: vpsrlw $3, %xmm0, %xmm0
1437 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1438 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
1439 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
1440 ; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
1443 ; AVX512VL-LABEL: splatconstant_shift_v16i8:
1444 ; AVX512VL: # %bb.0:
1445 ; AVX512VL-NEXT: vpsrlw $3, %xmm0, %xmm0
1446 ; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
1447 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
1448 ; AVX512VL-NEXT: vpxor %xmm1, %xmm0, %xmm0
1449 ; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
1450 ; AVX512VL-NEXT: retq
1452 ; X32-SSE-LABEL: splatconstant_shift_v16i8:
1454 ; X32-SSE-NEXT: psrlw $3, %xmm0
1455 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
1456 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
1457 ; X32-SSE-NEXT: pxor %xmm1, %xmm0
1458 ; X32-SSE-NEXT: psubb %xmm1, %xmm0
1459 ; X32-SSE-NEXT: retl
1460 %shift = ashr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
1461 ret <16 x i8> %shift