1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512DQVL
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL --check-prefix=AVX512BWVL
13 ; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
14 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
20 define <2 x i32> @var_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind {
21 ; SSE2-LABEL: var_shift_v2i32:
23 ; SSE2-NEXT: psllq $32, %xmm0
24 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
25 ; SSE2-NEXT: psrad $31, %xmm0
26 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
27 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
28 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
29 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
30 ; SSE2-NEXT: movdqa %xmm0, %xmm3
31 ; SSE2-NEXT: psrlq %xmm1, %xmm3
32 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
33 ; SSE2-NEXT: psrlq %xmm4, %xmm0
34 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
35 ; SSE2-NEXT: movdqa %xmm2, %xmm3
36 ; SSE2-NEXT: psrlq %xmm1, %xmm3
37 ; SSE2-NEXT: psrlq %xmm4, %xmm2
38 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
39 ; SSE2-NEXT: xorpd %xmm0, %xmm2
40 ; SSE2-NEXT: psubq %xmm0, %xmm2
41 ; SSE2-NEXT: movdqa %xmm2, %xmm0
44 ; SSE41-LABEL: var_shift_v2i32:
46 ; SSE41-NEXT: movdqa %xmm0, %xmm2
47 ; SSE41-NEXT: psllq $32, %xmm2
48 ; SSE41-NEXT: psrad $31, %xmm2
49 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
50 ; SSE41-NEXT: pxor %xmm0, %xmm0
51 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
52 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
53 ; SSE41-NEXT: movdqa %xmm2, %xmm3
54 ; SSE41-NEXT: psrlq %xmm1, %xmm3
55 ; SSE41-NEXT: psrlq %xmm0, %xmm2
56 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
57 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
58 ; SSE41-NEXT: movdqa %xmm3, %xmm4
59 ; SSE41-NEXT: psrlq %xmm0, %xmm4
60 ; SSE41-NEXT: psrlq %xmm1, %xmm3
61 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
62 ; SSE41-NEXT: pxor %xmm3, %xmm2
63 ; SSE41-NEXT: psubq %xmm3, %xmm2
64 ; SSE41-NEXT: movdqa %xmm2, %xmm0
67 ; AVX1-LABEL: var_shift_v2i32:
69 ; AVX1-NEXT: vpsllq $32, %xmm0, %xmm2
70 ; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2
71 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
72 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
73 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
74 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
75 ; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3
76 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
77 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
78 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
79 ; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1
80 ; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2
81 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
82 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
83 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
86 ; AVX2-LABEL: var_shift_v2i32:
88 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
89 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
90 ; AVX2-NEXT: vpsllq $32, %xmm0, %xmm2
91 ; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2
92 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
93 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
94 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
95 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1
96 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
97 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
100 ; XOPAVX1-LABEL: var_shift_v2i32:
102 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
103 ; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
104 ; XOPAVX1-NEXT: vpsllq $32, %xmm0, %xmm0
105 ; XOPAVX1-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
106 ; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
107 ; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
110 ; XOPAVX2-LABEL: var_shift_v2i32:
112 ; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
113 ; XOPAVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
114 ; XOPAVX2-NEXT: vpsllq $32, %xmm0, %xmm0
115 ; XOPAVX2-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
116 ; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
117 ; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0
120 ; AVX512-LABEL: var_shift_v2i32:
122 ; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0
123 ; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0
124 ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
125 ; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
126 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
127 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
128 ; AVX512-NEXT: vzeroupper
131 ; AVX512VL-LABEL: var_shift_v2i32:
133 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
134 ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
135 ; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0
136 ; AVX512VL-NEXT: vpsraq $32, %xmm0, %xmm0
137 ; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0
138 ; AVX512VL-NEXT: retq
140 ; X32-SSE-LABEL: var_shift_v2i32:
142 ; X32-SSE-NEXT: psllq $32, %xmm0
143 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
144 ; X32-SSE-NEXT: psrad $31, %xmm0
145 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
146 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
147 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
148 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,2147483648,0,2147483648]
149 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
150 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
151 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
152 ; X32-SSE-NEXT: xorps %xmm5, %xmm5
153 ; X32-SSE-NEXT: movss {{.*#+}} xmm5 = xmm4[0],xmm5[1,2,3]
154 ; X32-SSE-NEXT: psrlq %xmm5, %xmm0
155 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
156 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
157 ; X32-SSE-NEXT: psrlq %xmm5, %xmm3
158 ; X32-SSE-NEXT: psrlq %xmm1, %xmm2
159 ; X32-SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[0],xmm3[1]
160 ; X32-SSE-NEXT: xorpd %xmm0, %xmm2
161 ; X32-SSE-NEXT: psubq %xmm0, %xmm2
162 ; X32-SSE-NEXT: movdqa %xmm2, %xmm0
164 %shift = ashr <2 x i32> %a, %b
168 define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
169 ; SSE2-LABEL: var_shift_v4i16:
171 ; SSE2-NEXT: pslld $16, %xmm0
172 ; SSE2-NEXT: psrad $16, %xmm0
173 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
174 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
175 ; SSE2-NEXT: movdqa %xmm0, %xmm3
176 ; SSE2-NEXT: psrad %xmm2, %xmm3
177 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
178 ; SSE2-NEXT: movdqa %xmm0, %xmm2
179 ; SSE2-NEXT: psrad %xmm4, %xmm2
180 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
181 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
182 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
183 ; SSE2-NEXT: movdqa %xmm0, %xmm4
184 ; SSE2-NEXT: psrad %xmm3, %xmm4
185 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
186 ; SSE2-NEXT: psrad %xmm1, %xmm0
187 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
188 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
189 ; SSE2-NEXT: movaps %xmm2, %xmm0
192 ; SSE41-LABEL: var_shift_v4i16:
194 ; SSE41-NEXT: pxor %xmm2, %xmm2
195 ; SSE41-NEXT: movdqa %xmm1, %xmm3
196 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
197 ; SSE41-NEXT: pslld $16, %xmm0
198 ; SSE41-NEXT: psrad $16, %xmm0
199 ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,3,3,3,4,5,6,7]
200 ; SSE41-NEXT: movdqa %xmm0, %xmm5
201 ; SSE41-NEXT: psrad %xmm4, %xmm5
202 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
203 ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,3,3,3,4,5,6,7]
204 ; SSE41-NEXT: movdqa %xmm0, %xmm6
205 ; SSE41-NEXT: psrad %xmm4, %xmm6
206 ; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
207 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
208 ; SSE41-NEXT: movdqa %xmm0, %xmm2
209 ; SSE41-NEXT: psrad %xmm1, %xmm2
210 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,1,1,1,4,5,6,7]
211 ; SSE41-NEXT: psrad %xmm1, %xmm0
212 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
213 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
216 ; AVX1-LABEL: var_shift_v4i16:
218 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
219 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
220 ; AVX1-NEXT: vpslld $16, %xmm0, %xmm0
221 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
222 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
223 ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
224 ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
225 ; AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
226 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
227 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
228 ; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
229 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
230 ; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
231 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
232 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
235 ; AVX2-LABEL: var_shift_v4i16:
237 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
238 ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
239 ; AVX2-NEXT: vpslld $16, %xmm0, %xmm0
240 ; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
241 ; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
244 ; XOPAVX1-LABEL: var_shift_v4i16:
246 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
247 ; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
248 ; XOPAVX1-NEXT: vpslld $16, %xmm0, %xmm0
249 ; XOPAVX1-NEXT: vpsrad $16, %xmm0, %xmm0
250 ; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
251 ; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
254 ; XOPAVX2-LABEL: var_shift_v4i16:
256 ; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
257 ; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
258 ; XOPAVX2-NEXT: vpslld $16, %xmm0, %xmm0
259 ; XOPAVX2-NEXT: vpsrad $16, %xmm0, %xmm0
260 ; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
263 ; AVX512-LABEL: var_shift_v4i16:
265 ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
266 ; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
267 ; AVX512-NEXT: vpslld $16, %xmm0, %xmm0
268 ; AVX512-NEXT: vpsrad $16, %xmm0, %xmm0
269 ; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
272 ; AVX512VL-LABEL: var_shift_v4i16:
274 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
275 ; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
276 ; AVX512VL-NEXT: vpslld $16, %xmm0, %xmm0
277 ; AVX512VL-NEXT: vpsrad $16, %xmm0, %xmm0
278 ; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0
279 ; AVX512VL-NEXT: retq
281 ; X32-SSE-LABEL: var_shift_v4i16:
283 ; X32-SSE-NEXT: pslld $16, %xmm0
284 ; X32-SSE-NEXT: psrad $16, %xmm0
285 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
286 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
287 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
288 ; X32-SSE-NEXT: psrad %xmm2, %xmm3
289 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
290 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
291 ; X32-SSE-NEXT: psrad %xmm4, %xmm2
292 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
293 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
294 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
295 ; X32-SSE-NEXT: movdqa %xmm0, %xmm4
296 ; X32-SSE-NEXT: psrad %xmm3, %xmm4
297 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
298 ; X32-SSE-NEXT: psrad %xmm1, %xmm0
299 ; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
300 ; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
301 ; X32-SSE-NEXT: movaps %xmm2, %xmm0
303 %shift = ashr <4 x i16> %a, %b
307 define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
308 ; SSE2-LABEL: var_shift_v2i16:
310 ; SSE2-NEXT: psllq $48, %xmm0
311 ; SSE2-NEXT: movdqa %xmm0, %xmm2
312 ; SSE2-NEXT: psrad $31, %xmm2
313 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
314 ; SSE2-NEXT: psrad $16, %xmm0
315 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
316 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
317 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
318 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
319 ; SSE2-NEXT: movdqa %xmm2, %xmm3
320 ; SSE2-NEXT: psrlq %xmm1, %xmm3
321 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
322 ; SSE2-NEXT: psrlq %xmm4, %xmm2
323 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
324 ; SSE2-NEXT: movdqa %xmm0, %xmm3
325 ; SSE2-NEXT: psrlq %xmm1, %xmm3
326 ; SSE2-NEXT: psrlq %xmm4, %xmm0
327 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
328 ; SSE2-NEXT: xorpd %xmm2, %xmm0
329 ; SSE2-NEXT: psubq %xmm2, %xmm0
332 ; SSE41-LABEL: var_shift_v2i16:
334 ; SSE41-NEXT: psllq $48, %xmm0
335 ; SSE41-NEXT: movdqa %xmm0, %xmm2
336 ; SSE41-NEXT: psrad $31, %xmm2
337 ; SSE41-NEXT: psrad $16, %xmm0
338 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
339 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
340 ; SSE41-NEXT: pxor %xmm2, %xmm2
341 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
342 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
343 ; SSE41-NEXT: movdqa %xmm0, %xmm3
344 ; SSE41-NEXT: psrlq %xmm1, %xmm3
345 ; SSE41-NEXT: psrlq %xmm2, %xmm0
346 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
347 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
348 ; SSE41-NEXT: movdqa %xmm3, %xmm4
349 ; SSE41-NEXT: psrlq %xmm2, %xmm4
350 ; SSE41-NEXT: psrlq %xmm1, %xmm3
351 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
352 ; SSE41-NEXT: pxor %xmm3, %xmm0
353 ; SSE41-NEXT: psubq %xmm3, %xmm0
356 ; AVX1-LABEL: var_shift_v2i16:
358 ; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
359 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
360 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
361 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
362 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
363 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
364 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
365 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
366 ; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3
367 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
368 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
369 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
370 ; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1
371 ; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2
372 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
373 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
374 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
377 ; AVX2-LABEL: var_shift_v2i16:
379 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
380 ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
381 ; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0
382 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2
383 ; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
384 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
385 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
386 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
387 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
388 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1
389 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
390 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
393 ; XOP-LABEL: var_shift_v2i16:
395 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
396 ; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
397 ; XOP-NEXT: vpsllq $48, %xmm0, %xmm0
398 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
399 ; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
400 ; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
403 ; AVX512-LABEL: var_shift_v2i16:
405 ; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0
406 ; AVX512-NEXT: vpsraq $48, %zmm0, %zmm0
407 ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
408 ; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
409 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
410 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
411 ; AVX512-NEXT: vzeroupper
414 ; AVX512VL-LABEL: var_shift_v2i16:
416 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
417 ; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
418 ; AVX512VL-NEXT: vpsllq $48, %xmm0, %xmm0
419 ; AVX512VL-NEXT: vpsraq $48, %xmm0, %xmm0
420 ; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0
421 ; AVX512VL-NEXT: retq
423 ; X32-SSE-LABEL: var_shift_v2i16:
425 ; X32-SSE-NEXT: psllq $48, %xmm0
426 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
427 ; X32-SSE-NEXT: psrad $31, %xmm2
428 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
429 ; X32-SSE-NEXT: psrad $16, %xmm0
430 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
431 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
432 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
433 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
434 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
435 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
436 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
437 ; X32-SSE-NEXT: psrlq %xmm4, %xmm2
438 ; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
439 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
440 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
441 ; X32-SSE-NEXT: psrlq %xmm4, %xmm0
442 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
443 ; X32-SSE-NEXT: xorpd %xmm2, %xmm0
444 ; X32-SSE-NEXT: psubq %xmm2, %xmm0
446 %shift = ashr <2 x i16> %a, %b
450 define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
451 ; SSE2-LABEL: var_shift_v8i8:
453 ; SSE2-NEXT: movdqa %xmm0, %xmm2
454 ; SSE2-NEXT: psllw $8, %xmm2
455 ; SSE2-NEXT: movdqa %xmm2, %xmm3
456 ; SSE2-NEXT: psraw $8, %xmm3
457 ; SSE2-NEXT: psllw $12, %xmm1
458 ; SSE2-NEXT: movdqa %xmm1, %xmm0
459 ; SSE2-NEXT: psraw $15, %xmm0
460 ; SSE2-NEXT: psraw $15, %xmm2
461 ; SSE2-NEXT: pand %xmm0, %xmm2
462 ; SSE2-NEXT: pandn %xmm3, %xmm0
463 ; SSE2-NEXT: por %xmm2, %xmm0
464 ; SSE2-NEXT: paddw %xmm1, %xmm1
465 ; SSE2-NEXT: movdqa %xmm1, %xmm2
466 ; SSE2-NEXT: psraw $15, %xmm2
467 ; SSE2-NEXT: movdqa %xmm2, %xmm3
468 ; SSE2-NEXT: pandn %xmm0, %xmm3
469 ; SSE2-NEXT: psraw $4, %xmm0
470 ; SSE2-NEXT: pand %xmm2, %xmm0
471 ; SSE2-NEXT: por %xmm3, %xmm0
472 ; SSE2-NEXT: paddw %xmm1, %xmm1
473 ; SSE2-NEXT: movdqa %xmm1, %xmm2
474 ; SSE2-NEXT: psraw $15, %xmm2
475 ; SSE2-NEXT: movdqa %xmm2, %xmm3
476 ; SSE2-NEXT: pandn %xmm0, %xmm3
477 ; SSE2-NEXT: psraw $2, %xmm0
478 ; SSE2-NEXT: pand %xmm2, %xmm0
479 ; SSE2-NEXT: por %xmm3, %xmm0
480 ; SSE2-NEXT: paddw %xmm1, %xmm1
481 ; SSE2-NEXT: psraw $15, %xmm1
482 ; SSE2-NEXT: movdqa %xmm1, %xmm2
483 ; SSE2-NEXT: pandn %xmm0, %xmm2
484 ; SSE2-NEXT: psraw $1, %xmm0
485 ; SSE2-NEXT: pand %xmm1, %xmm0
486 ; SSE2-NEXT: por %xmm2, %xmm0
489 ; SSE41-LABEL: var_shift_v8i8:
491 ; SSE41-NEXT: movdqa %xmm1, %xmm2
492 ; SSE41-NEXT: movdqa %xmm0, %xmm3
493 ; SSE41-NEXT: psllw $8, %xmm3
494 ; SSE41-NEXT: movdqa %xmm3, %xmm1
495 ; SSE41-NEXT: psraw $8, %xmm1
496 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
497 ; SSE41-NEXT: movdqa %xmm2, %xmm0
498 ; SSE41-NEXT: psllw $12, %xmm0
499 ; SSE41-NEXT: psllw $4, %xmm2
500 ; SSE41-NEXT: por %xmm0, %xmm2
501 ; SSE41-NEXT: movdqa %xmm2, %xmm4
502 ; SSE41-NEXT: paddw %xmm2, %xmm4
503 ; SSE41-NEXT: psraw $15, %xmm3
504 ; SSE41-NEXT: movdqa %xmm2, %xmm0
505 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
506 ; SSE41-NEXT: movdqa %xmm1, %xmm2
507 ; SSE41-NEXT: psraw $4, %xmm2
508 ; SSE41-NEXT: movdqa %xmm4, %xmm0
509 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
510 ; SSE41-NEXT: movdqa %xmm1, %xmm2
511 ; SSE41-NEXT: psraw $2, %xmm2
512 ; SSE41-NEXT: paddw %xmm4, %xmm4
513 ; SSE41-NEXT: movdqa %xmm4, %xmm0
514 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
515 ; SSE41-NEXT: movdqa %xmm1, %xmm2
516 ; SSE41-NEXT: psraw $1, %xmm2
517 ; SSE41-NEXT: paddw %xmm4, %xmm4
518 ; SSE41-NEXT: movdqa %xmm4, %xmm0
519 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
520 ; SSE41-NEXT: movdqa %xmm1, %xmm0
523 ; AVX1-LABEL: var_shift_v8i8:
525 ; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0
526 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm2
527 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
528 ; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
529 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
530 ; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
531 ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
532 ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
533 ; AVX1-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0
534 ; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
535 ; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
536 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
537 ; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm2
538 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
539 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
540 ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
541 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
544 ; AVX2-LABEL: var_shift_v8i8:
546 ; AVX2-NEXT: vpsllw $8, %xmm0, %xmm0
547 ; AVX2-NEXT: vpsraw $8, %xmm0, %xmm0
548 ; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
549 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
550 ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
551 ; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
552 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
553 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
554 ; AVX2-NEXT: vzeroupper
557 ; XOP-LABEL: var_shift_v8i8:
559 ; XOP-NEXT: vpsllw $8, %xmm0, %xmm0
560 ; XOP-NEXT: vpsraw $8, %xmm0, %xmm0
561 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
562 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
563 ; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
564 ; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
567 ; AVX512DQ-LABEL: var_shift_v8i8:
569 ; AVX512DQ-NEXT: vpsllw $8, %xmm0, %xmm0
570 ; AVX512DQ-NEXT: vpsraw $8, %xmm0, %xmm0
571 ; AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
572 ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
573 ; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
574 ; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
575 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
576 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
577 ; AVX512DQ-NEXT: vzeroupper
578 ; AVX512DQ-NEXT: retq
580 ; AVX512BW-LABEL: var_shift_v8i8:
582 ; AVX512BW-NEXT: vpsllw $8, %xmm0, %xmm0
583 ; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0
584 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
585 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
586 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
587 ; AVX512BW-NEXT: vzeroupper
588 ; AVX512BW-NEXT: retq
590 ; AVX512DQVL-LABEL: var_shift_v8i8:
591 ; AVX512DQVL: # %bb.0:
592 ; AVX512DQVL-NEXT: vpsllw $8, %xmm0, %xmm0
593 ; AVX512DQVL-NEXT: vpsraw $8, %xmm0, %xmm0
594 ; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
595 ; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
596 ; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
597 ; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
598 ; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
599 ; AVX512DQVL-NEXT: vzeroupper
600 ; AVX512DQVL-NEXT: retq
602 ; AVX512BWVL-LABEL: var_shift_v8i8:
603 ; AVX512BWVL: # %bb.0:
604 ; AVX512BWVL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
605 ; AVX512BWVL-NEXT: vpsllw $8, %xmm0, %xmm0
606 ; AVX512BWVL-NEXT: vpsraw $8, %xmm0, %xmm0
607 ; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0
608 ; AVX512BWVL-NEXT: retq
610 ; X32-SSE-LABEL: var_shift_v8i8:
612 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
613 ; X32-SSE-NEXT: psllw $8, %xmm2
614 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
615 ; X32-SSE-NEXT: psraw $8, %xmm3
616 ; X32-SSE-NEXT: psllw $12, %xmm1
617 ; X32-SSE-NEXT: movdqa %xmm1, %xmm0
618 ; X32-SSE-NEXT: psraw $15, %xmm0
619 ; X32-SSE-NEXT: psraw $15, %xmm2
620 ; X32-SSE-NEXT: pand %xmm0, %xmm2
621 ; X32-SSE-NEXT: pandn %xmm3, %xmm0
622 ; X32-SSE-NEXT: por %xmm2, %xmm0
623 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
624 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
625 ; X32-SSE-NEXT: psraw $15, %xmm2
626 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
627 ; X32-SSE-NEXT: pandn %xmm0, %xmm3
628 ; X32-SSE-NEXT: psraw $4, %xmm0
629 ; X32-SSE-NEXT: pand %xmm2, %xmm0
630 ; X32-SSE-NEXT: por %xmm3, %xmm0
631 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
632 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
633 ; X32-SSE-NEXT: psraw $15, %xmm2
634 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
635 ; X32-SSE-NEXT: pandn %xmm0, %xmm3
636 ; X32-SSE-NEXT: psraw $2, %xmm0
637 ; X32-SSE-NEXT: pand %xmm2, %xmm0
638 ; X32-SSE-NEXT: por %xmm3, %xmm0
639 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
640 ; X32-SSE-NEXT: psraw $15, %xmm1
641 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
642 ; X32-SSE-NEXT: pandn %xmm0, %xmm2
643 ; X32-SSE-NEXT: psraw $1, %xmm0
644 ; X32-SSE-NEXT: pand %xmm1, %xmm0
645 ; X32-SSE-NEXT: por %xmm2, %xmm0
647 %shift = ashr <8 x i8> %a, %b
651 define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
652 ; SSE2-LABEL: var_shift_v4i8:
654 ; SSE2-NEXT: pslld $24, %xmm0
655 ; SSE2-NEXT: psrad $24, %xmm0
656 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
657 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
658 ; SSE2-NEXT: movdqa %xmm0, %xmm3
659 ; SSE2-NEXT: psrad %xmm2, %xmm3
660 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
661 ; SSE2-NEXT: movdqa %xmm0, %xmm2
662 ; SSE2-NEXT: psrad %xmm4, %xmm2
663 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
664 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
665 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
666 ; SSE2-NEXT: movdqa %xmm0, %xmm4
667 ; SSE2-NEXT: psrad %xmm3, %xmm4
668 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
669 ; SSE2-NEXT: psrad %xmm1, %xmm0
670 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
671 ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
672 ; SSE2-NEXT: movaps %xmm2, %xmm0
675 ; SSE41-LABEL: var_shift_v4i8:
677 ; SSE41-NEXT: pslld $24, %xmm0
678 ; SSE41-NEXT: psrad $24, %xmm0
679 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
680 ; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
681 ; SSE41-NEXT: movdqa %xmm0, %xmm3
682 ; SSE41-NEXT: psrad %xmm2, %xmm3
683 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
684 ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
685 ; SSE41-NEXT: movdqa %xmm0, %xmm5
686 ; SSE41-NEXT: psrad %xmm4, %xmm5
687 ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
688 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
689 ; SSE41-NEXT: movdqa %xmm0, %xmm3
690 ; SSE41-NEXT: psrad %xmm1, %xmm3
691 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
692 ; SSE41-NEXT: psrad %xmm1, %xmm0
693 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
694 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
697 ; AVX1-LABEL: var_shift_v4i8:
699 ; AVX1-NEXT: vpslld $24, %xmm0, %xmm0
700 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
701 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
702 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
703 ; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
704 ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
705 ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
706 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
707 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
708 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
709 ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
710 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
711 ; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
712 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
713 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
716 ; AVX2-LABEL: var_shift_v4i8:
718 ; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
719 ; AVX2-NEXT: vpslld $24, %xmm0, %xmm0
720 ; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
721 ; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
724 ; XOPAVX1-LABEL: var_shift_v4i8:
726 ; XOPAVX1-NEXT: vpslld $24, %xmm0, %xmm0
727 ; XOPAVX1-NEXT: vpsrad $24, %xmm0, %xmm0
728 ; XOPAVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
729 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
730 ; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
731 ; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
734 ; XOPAVX2-LABEL: var_shift_v4i8:
736 ; XOPAVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
737 ; XOPAVX2-NEXT: vpslld $24, %xmm0, %xmm0
738 ; XOPAVX2-NEXT: vpsrad $24, %xmm0, %xmm0
739 ; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
742 ; AVX512-LABEL: var_shift_v4i8:
744 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
745 ; AVX512-NEXT: vpslld $24, %xmm0, %xmm0
746 ; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0
747 ; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
750 ; AVX512VL-LABEL: var_shift_v4i8:
752 ; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
753 ; AVX512VL-NEXT: vpslld $24, %xmm0, %xmm0
754 ; AVX512VL-NEXT: vpsrad $24, %xmm0, %xmm0
755 ; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0
756 ; AVX512VL-NEXT: retq
758 ; X32-SSE-LABEL: var_shift_v4i8:
760 ; X32-SSE-NEXT: pslld $24, %xmm0
761 ; X32-SSE-NEXT: psrad $24, %xmm0
762 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
763 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
764 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
765 ; X32-SSE-NEXT: psrad %xmm2, %xmm3
766 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
767 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
768 ; X32-SSE-NEXT: psrad %xmm4, %xmm2
769 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
770 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
771 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
772 ; X32-SSE-NEXT: movdqa %xmm0, %xmm4
773 ; X32-SSE-NEXT: psrad %xmm3, %xmm4
774 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
775 ; X32-SSE-NEXT: psrad %xmm1, %xmm0
776 ; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
777 ; X32-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
778 ; X32-SSE-NEXT: movaps %xmm2, %xmm0
780 %shift = ashr <4 x i8> %a, %b
784 define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
785 ; SSE2-LABEL: var_shift_v2i8:
787 ; SSE2-NEXT: psllq $56, %xmm0
788 ; SSE2-NEXT: movdqa %xmm0, %xmm2
789 ; SSE2-NEXT: psrad $31, %xmm2
790 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
791 ; SSE2-NEXT: psrad $24, %xmm0
792 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
793 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
794 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
795 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
796 ; SSE2-NEXT: movdqa %xmm2, %xmm3
797 ; SSE2-NEXT: psrlq %xmm1, %xmm3
798 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
799 ; SSE2-NEXT: psrlq %xmm4, %xmm2
800 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
801 ; SSE2-NEXT: movdqa %xmm0, %xmm3
802 ; SSE2-NEXT: psrlq %xmm1, %xmm3
803 ; SSE2-NEXT: psrlq %xmm4, %xmm0
804 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
805 ; SSE2-NEXT: xorpd %xmm2, %xmm0
806 ; SSE2-NEXT: psubq %xmm2, %xmm0
809 ; SSE41-LABEL: var_shift_v2i8:
811 ; SSE41-NEXT: psllq $56, %xmm0
812 ; SSE41-NEXT: movdqa %xmm0, %xmm2
813 ; SSE41-NEXT: psrad $31, %xmm2
814 ; SSE41-NEXT: psrad $24, %xmm0
815 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
816 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
817 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
818 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
819 ; SSE41-NEXT: movdqa %xmm2, %xmm3
820 ; SSE41-NEXT: psrlq %xmm1, %xmm3
821 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
822 ; SSE41-NEXT: psrlq %xmm4, %xmm2
823 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
824 ; SSE41-NEXT: movdqa %xmm0, %xmm3
825 ; SSE41-NEXT: psrlq %xmm4, %xmm3
826 ; SSE41-NEXT: psrlq %xmm1, %xmm0
827 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
828 ; SSE41-NEXT: pxor %xmm2, %xmm0
829 ; SSE41-NEXT: psubq %xmm2, %xmm0
832 ; AVX1-LABEL: var_shift_v2i8:
834 ; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0
835 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
836 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
837 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
838 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
839 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
840 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
841 ; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3
842 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
843 ; AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm2
844 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
845 ; AVX1-NEXT: vpsrlq %xmm4, %xmm0, %xmm3
846 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
847 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
848 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
849 ; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
852 ; AVX2-LABEL: var_shift_v2i8:
854 ; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0
855 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2
856 ; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
857 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
858 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
859 ; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
860 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
861 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm2
862 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
863 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
864 ; AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0
867 ; XOP-LABEL: var_shift_v2i8:
869 ; XOP-NEXT: vpsllq $56, %xmm0, %xmm0
870 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
871 ; XOP-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
872 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
873 ; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
874 ; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
877 ; AVX512-LABEL: var_shift_v2i8:
879 ; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0
880 ; AVX512-NEXT: vpsraq $56, %zmm0, %zmm0
881 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
882 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
883 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
884 ; AVX512-NEXT: vzeroupper
887 ; AVX512VL-LABEL: var_shift_v2i8:
889 ; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
890 ; AVX512VL-NEXT: vpsllq $56, %xmm0, %xmm0
891 ; AVX512VL-NEXT: vpsraq $56, %xmm0, %xmm0
892 ; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0
893 ; AVX512VL-NEXT: retq
895 ; X32-SSE-LABEL: var_shift_v2i8:
897 ; X32-SSE-NEXT: psllq $56, %xmm0
898 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
899 ; X32-SSE-NEXT: psrad $31, %xmm2
900 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
901 ; X32-SSE-NEXT: psrad $24, %xmm0
902 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
903 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
904 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
905 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
906 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
907 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
908 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
909 ; X32-SSE-NEXT: psrlq %xmm4, %xmm2
910 ; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
911 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
912 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
913 ; X32-SSE-NEXT: psrlq %xmm4, %xmm0
914 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
915 ; X32-SSE-NEXT: xorpd %xmm2, %xmm0
916 ; X32-SSE-NEXT: psubq %xmm2, %xmm0
918 %shift = ashr <2 x i8> %a, %b
923 ; Uniform Variable Shifts
926 define <2 x i32> @splatvar_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind {
927 ; SSE2-LABEL: splatvar_shift_v2i32:
929 ; SSE2-NEXT: psllq $32, %xmm0
930 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
931 ; SSE2-NEXT: psrad $31, %xmm0
932 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
933 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
934 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,1]
935 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
936 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
937 ; SSE2-NEXT: movdqa %xmm1, %xmm3
938 ; SSE2-NEXT: psrlq %xmm0, %xmm3
939 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
940 ; SSE2-NEXT: psrlq %xmm4, %xmm1
941 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
942 ; SSE2-NEXT: movdqa %xmm2, %xmm3
943 ; SSE2-NEXT: psrlq %xmm0, %xmm3
944 ; SSE2-NEXT: psrlq %xmm4, %xmm2
945 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
946 ; SSE2-NEXT: xorpd %xmm1, %xmm2
947 ; SSE2-NEXT: psubq %xmm1, %xmm2
948 ; SSE2-NEXT: movdqa %xmm2, %xmm0
951 ; SSE41-LABEL: splatvar_shift_v2i32:
953 ; SSE41-NEXT: movdqa %xmm0, %xmm2
954 ; SSE41-NEXT: psllq $32, %xmm2
955 ; SSE41-NEXT: psrad $31, %xmm2
956 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
957 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,0,1]
958 ; SSE41-NEXT: pxor %xmm1, %xmm1
959 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
960 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
961 ; SSE41-NEXT: movdqa %xmm2, %xmm3
962 ; SSE41-NEXT: psrlq %xmm0, %xmm3
963 ; SSE41-NEXT: psrlq %xmm1, %xmm2
964 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
965 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
966 ; SSE41-NEXT: movdqa %xmm3, %xmm4
967 ; SSE41-NEXT: psrlq %xmm1, %xmm4
968 ; SSE41-NEXT: psrlq %xmm0, %xmm3
969 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
970 ; SSE41-NEXT: pxor %xmm3, %xmm2
971 ; SSE41-NEXT: psubq %xmm3, %xmm2
972 ; SSE41-NEXT: movdqa %xmm2, %xmm0
975 ; AVX1-LABEL: splatvar_shift_v2i32:
977 ; AVX1-NEXT: vpsllq $32, %xmm0, %xmm2
978 ; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2
979 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
980 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
981 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
982 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
983 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
984 ; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3
985 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
986 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
987 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
988 ; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1
989 ; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2
990 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
991 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
992 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
995 ; AVX2-LABEL: splatvar_shift_v2i32:
997 ; AVX2-NEXT: vpsllq $32, %xmm0, %xmm2
998 ; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2
999 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
1000 ; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
1001 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
1002 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
1003 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
1004 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1005 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1
1006 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
1007 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
1010 ; XOPAVX1-LABEL: splatvar_shift_v2i32:
1012 ; XOPAVX1-NEXT: vpsllq $32, %xmm0, %xmm0
1013 ; XOPAVX1-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
1014 ; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
1015 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1016 ; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
1017 ; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
1018 ; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
1019 ; XOPAVX1-NEXT: retq
1021 ; XOPAVX2-LABEL: splatvar_shift_v2i32:
1023 ; XOPAVX2-NEXT: vpsllq $32, %xmm0, %xmm0
1024 ; XOPAVX2-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
1025 ; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1
1026 ; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
1027 ; XOPAVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
1028 ; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
1029 ; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0
1030 ; XOPAVX2-NEXT: retq
1032 ; AVX512-LABEL: splatvar_shift_v2i32:
1034 ; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0
1035 ; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0
1036 ; AVX512-NEXT: vpbroadcastq %xmm1, %xmm1
1037 ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
1038 ; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
1039 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
1040 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1041 ; AVX512-NEXT: vzeroupper
1044 ; AVX512VL-LABEL: splatvar_shift_v2i32:
1045 ; AVX512VL: # %bb.0:
1046 ; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0
1047 ; AVX512VL-NEXT: vpsraq $32, %xmm0, %xmm0
1048 ; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm1
1049 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
1050 ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
1051 ; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0
1052 ; AVX512VL-NEXT: retq
1054 ; X32-SSE-LABEL: splatvar_shift_v2i32:
1056 ; X32-SSE-NEXT: psllq $32, %xmm0
1057 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
1058 ; X32-SSE-NEXT: psrad $31, %xmm0
1059 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
1060 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1061 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [4294967295,0,4294967295,0]
1062 ; X32-SSE-NEXT: pand %xmm1, %xmm0
1063 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
1064 ; X32-SSE-NEXT: movdqa %xmm3, %xmm4
1065 ; X32-SSE-NEXT: psrlq %xmm0, %xmm4
1066 ; X32-SSE-NEXT: xorps %xmm5, %xmm5
1067 ; X32-SSE-NEXT: movss {{.*#+}} xmm5 = xmm1[0],xmm5[1,2,3]
1068 ; X32-SSE-NEXT: psrlq %xmm5, %xmm3
1069 ; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
1070 ; X32-SSE-NEXT: movdqa %xmm2, %xmm1
1071 ; X32-SSE-NEXT: psrlq %xmm5, %xmm1
1072 ; X32-SSE-NEXT: psrlq %xmm0, %xmm2
1073 ; X32-SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[0],xmm1[1]
1074 ; X32-SSE-NEXT: xorpd %xmm3, %xmm2
1075 ; X32-SSE-NEXT: psubq %xmm3, %xmm2
1076 ; X32-SSE-NEXT: movdqa %xmm2, %xmm0
1077 ; X32-SSE-NEXT: retl
1078 %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
1079 %shift = ashr <2 x i32> %a, %splat
1080 ret <2 x i32> %shift
1083 define <4 x i16> @splatvar_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind {
1084 ; SSE2-LABEL: splatvar_shift_v4i16:
1086 ; SSE2-NEXT: pslld $16, %xmm0
1087 ; SSE2-NEXT: psrad $16, %xmm0
1088 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
1089 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
1090 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
1091 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1092 ; SSE2-NEXT: psrad %xmm1, %xmm3
1093 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7]
1094 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1095 ; SSE2-NEXT: psrad %xmm4, %xmm1
1096 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
1097 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
1098 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
1099 ; SSE2-NEXT: movdqa %xmm0, %xmm4
1100 ; SSE2-NEXT: psrad %xmm3, %xmm4
1101 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
1102 ; SSE2-NEXT: psrad %xmm2, %xmm0
1103 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
1104 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
1105 ; SSE2-NEXT: movaps %xmm1, %xmm0
1108 ; SSE41-LABEL: splatvar_shift_v4i16:
1110 ; SSE41-NEXT: pslld $16, %xmm0
1111 ; SSE41-NEXT: psrad $16, %xmm0
1112 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
1113 ; SSE41-NEXT: pxor %xmm2, %xmm2
1114 ; SSE41-NEXT: movdqa %xmm1, %xmm3
1115 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
1116 ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,3,3,3,4,5,6,7]
1117 ; SSE41-NEXT: movdqa %xmm0, %xmm5
1118 ; SSE41-NEXT: psrad %xmm4, %xmm5
1119 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
1120 ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm3[2,3,3,3,4,5,6,7]
1121 ; SSE41-NEXT: movdqa %xmm0, %xmm6
1122 ; SSE41-NEXT: psrad %xmm4, %xmm6
1123 ; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4,5,6,7]
1124 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
1125 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1126 ; SSE41-NEXT: psrad %xmm1, %xmm2
1127 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[0,1,1,1,4,5,6,7]
1128 ; SSE41-NEXT: psrad %xmm1, %xmm0
1129 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
1130 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
1133 ; AVX1-LABEL: splatvar_shift_v4i16:
1135 ; AVX1-NEXT: vpslld $16, %xmm0, %xmm0
1136 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
1137 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
1138 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1139 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
1140 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1141 ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
1142 ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
1143 ; AVX1-NEXT: vpsrad %xmm4, %xmm0, %xmm4
1144 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
1145 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
1146 ; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
1147 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
1148 ; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
1149 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
1150 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
1153 ; AVX2-LABEL: splatvar_shift_v4i16:
1155 ; AVX2-NEXT: vpslld $16, %xmm0, %xmm0
1156 ; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
1157 ; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
1158 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
1159 ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
1160 ; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
1163 ; XOPAVX1-LABEL: splatvar_shift_v4i16:
1165 ; XOPAVX1-NEXT: vpslld $16, %xmm0, %xmm0
1166 ; XOPAVX1-NEXT: vpsrad $16, %xmm0, %xmm0
1167 ; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
1168 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1169 ; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
1170 ; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
1171 ; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
1172 ; XOPAVX1-NEXT: retq
1174 ; XOPAVX2-LABEL: splatvar_shift_v4i16:
1176 ; XOPAVX2-NEXT: vpslld $16, %xmm0, %xmm0
1177 ; XOPAVX2-NEXT: vpsrad $16, %xmm0, %xmm0
1178 ; XOPAVX2-NEXT: vpbroadcastd %xmm1, %xmm1
1179 ; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
1180 ; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
1181 ; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
1182 ; XOPAVX2-NEXT: retq
1184 ; AVX512-LABEL: splatvar_shift_v4i16:
1186 ; AVX512-NEXT: vpslld $16, %xmm0, %xmm0
1187 ; AVX512-NEXT: vpsrad $16, %xmm0, %xmm0
1188 ; AVX512-NEXT: vpbroadcastd %xmm1, %xmm1
1189 ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
1190 ; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
1191 ; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
1194 ; AVX512VL-LABEL: splatvar_shift_v4i16:
1195 ; AVX512VL: # %bb.0:
1196 ; AVX512VL-NEXT: vpslld $16, %xmm0, %xmm0
1197 ; AVX512VL-NEXT: vpsrad $16, %xmm0, %xmm0
1198 ; AVX512VL-NEXT: vpbroadcastd %xmm1, %xmm1
1199 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
1200 ; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
1201 ; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0
1202 ; AVX512VL-NEXT: retq
1204 ; X32-SSE-LABEL: splatvar_shift_v4i16:
1206 ; X32-SSE-NEXT: pslld $16, %xmm0
1207 ; X32-SSE-NEXT: psrad $16, %xmm0
1208 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
1209 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2
1210 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
1211 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
1212 ; X32-SSE-NEXT: psrad %xmm1, %xmm3
1213 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7]
1214 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1215 ; X32-SSE-NEXT: psrad %xmm4, %xmm1
1216 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
1217 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
1218 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
1219 ; X32-SSE-NEXT: movdqa %xmm0, %xmm4
1220 ; X32-SSE-NEXT: psrad %xmm3, %xmm4
1221 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
1222 ; X32-SSE-NEXT: psrad %xmm2, %xmm0
1223 ; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
1224 ; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
1225 ; X32-SSE-NEXT: movaps %xmm1, %xmm0
1226 ; X32-SSE-NEXT: retl
1227 %splat = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
1228 %shift = ashr <4 x i16> %a, %splat
1229 ret <4 x i16> %shift
1232 define <2 x i16> @splatvar_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind {
1233 ; SSE2-LABEL: splatvar_shift_v2i16:
1235 ; SSE2-NEXT: psllq $48, %xmm0
1236 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1237 ; SSE2-NEXT: psrad $31, %xmm2
1238 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
1239 ; SSE2-NEXT: psrad $16, %xmm0
1240 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
1241 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1242 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
1243 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
1244 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1245 ; SSE2-NEXT: movdqa %xmm2, %xmm3
1246 ; SSE2-NEXT: psrlq %xmm1, %xmm3
1247 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
1248 ; SSE2-NEXT: psrlq %xmm4, %xmm2
1249 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
1250 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1251 ; SSE2-NEXT: psrlq %xmm1, %xmm3
1252 ; SSE2-NEXT: psrlq %xmm4, %xmm0
1253 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
1254 ; SSE2-NEXT: xorpd %xmm2, %xmm0
1255 ; SSE2-NEXT: psubq %xmm2, %xmm0
1258 ; SSE41-LABEL: splatvar_shift_v2i16:
1260 ; SSE41-NEXT: psllq $48, %xmm0
1261 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1262 ; SSE41-NEXT: psrad $31, %xmm2
1263 ; SSE41-NEXT: psrad $16, %xmm0
1264 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
1265 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1266 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
1267 ; SSE41-NEXT: pxor %xmm2, %xmm2
1268 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
1269 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
1270 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1271 ; SSE41-NEXT: psrlq %xmm1, %xmm3
1272 ; SSE41-NEXT: psrlq %xmm2, %xmm0
1273 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
1274 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
1275 ; SSE41-NEXT: movdqa %xmm3, %xmm4
1276 ; SSE41-NEXT: psrlq %xmm2, %xmm4
1277 ; SSE41-NEXT: psrlq %xmm1, %xmm3
1278 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
1279 ; SSE41-NEXT: pxor %xmm3, %xmm0
1280 ; SSE41-NEXT: psubq %xmm3, %xmm0
1283 ; AVX1-LABEL: splatvar_shift_v2i16:
1285 ; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
1286 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
1287 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
1288 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
1289 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1290 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
1291 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1292 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
1293 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
1294 ; AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm3
1295 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
1296 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
1297 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
1298 ; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1
1299 ; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2
1300 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
1301 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
1302 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
1305 ; AVX2-LABEL: splatvar_shift_v2i16:
1307 ; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0
1308 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2
1309 ; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
1310 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
1311 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
1312 ; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
1313 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
1314 ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
1315 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
1316 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1317 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1
1318 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
1319 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
1322 ; XOPAVX1-LABEL: splatvar_shift_v2i16:
1324 ; XOPAVX1-NEXT: vpsllq $48, %xmm0, %xmm0
1325 ; XOPAVX1-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
1326 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1327 ; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
1328 ; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
1329 ; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
1330 ; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
1331 ; XOPAVX1-NEXT: retq
1333 ; XOPAVX2-LABEL: splatvar_shift_v2i16:
1335 ; XOPAVX2-NEXT: vpsllq $48, %xmm0, %xmm0
1336 ; XOPAVX2-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
1337 ; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
1338 ; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1
1339 ; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
1340 ; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
1341 ; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0
1342 ; XOPAVX2-NEXT: retq
1344 ; AVX512-LABEL: splatvar_shift_v2i16:
1346 ; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0
1347 ; AVX512-NEXT: vpsraq $48, %zmm0, %zmm0
1348 ; AVX512-NEXT: vpbroadcastq %xmm1, %xmm1
1349 ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
1350 ; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
1351 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
1352 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1353 ; AVX512-NEXT: vzeroupper
1356 ; AVX512VL-LABEL: splatvar_shift_v2i16:
1357 ; AVX512VL: # %bb.0:
1358 ; AVX512VL-NEXT: vpsllq $48, %xmm0, %xmm0
1359 ; AVX512VL-NEXT: vpsraq $48, %xmm0, %xmm0
1360 ; AVX512VL-NEXT: vpbroadcastq %xmm1, %xmm1
1361 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
1362 ; AVX512VL-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
1363 ; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0
1364 ; AVX512VL-NEXT: retq
1366 ; X32-SSE-LABEL: splatvar_shift_v2i16:
1368 ; X32-SSE-NEXT: psllq $48, %xmm0
1369 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
1370 ; X32-SSE-NEXT: psrad $31, %xmm2
1371 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
1372 ; X32-SSE-NEXT: psrad $16, %xmm0
1373 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
1374 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1375 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
1376 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
1377 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
1378 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
1379 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
1380 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
1381 ; X32-SSE-NEXT: psrlq %xmm4, %xmm2
1382 ; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
1383 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
1384 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
1385 ; X32-SSE-NEXT: psrlq %xmm4, %xmm0
1386 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
1387 ; X32-SSE-NEXT: xorpd %xmm2, %xmm0
1388 ; X32-SSE-NEXT: psubq %xmm2, %xmm0
1389 ; X32-SSE-NEXT: retl
1390 %splat = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
1391 %shift = ashr <2 x i16> %a, %splat
1392 ret <2 x i16> %shift
1395 define <8 x i8> @splatvar_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
1396 ; SSE2-LABEL: splatvar_shift_v8i8:
1398 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1399 ; SSE2-NEXT: psllw $8, %xmm2
1400 ; SSE2-NEXT: movdqa %xmm2, %xmm3
1401 ; SSE2-NEXT: psraw $8, %xmm3
1402 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
1403 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
1404 ; SSE2-NEXT: psllw $12, %xmm1
1405 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1406 ; SSE2-NEXT: psraw $15, %xmm0
1407 ; SSE2-NEXT: psraw $15, %xmm2
1408 ; SSE2-NEXT: pand %xmm0, %xmm2
1409 ; SSE2-NEXT: pandn %xmm3, %xmm0
1410 ; SSE2-NEXT: por %xmm2, %xmm0
1411 ; SSE2-NEXT: paddw %xmm1, %xmm1
1412 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1413 ; SSE2-NEXT: psraw $15, %xmm2
1414 ; SSE2-NEXT: movdqa %xmm2, %xmm3
1415 ; SSE2-NEXT: pandn %xmm0, %xmm3
1416 ; SSE2-NEXT: psraw $4, %xmm0
1417 ; SSE2-NEXT: pand %xmm2, %xmm0
1418 ; SSE2-NEXT: por %xmm3, %xmm0
1419 ; SSE2-NEXT: paddw %xmm1, %xmm1
1420 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1421 ; SSE2-NEXT: psraw $15, %xmm2
1422 ; SSE2-NEXT: movdqa %xmm2, %xmm3
1423 ; SSE2-NEXT: pandn %xmm0, %xmm3
1424 ; SSE2-NEXT: psraw $2, %xmm0
1425 ; SSE2-NEXT: pand %xmm2, %xmm0
1426 ; SSE2-NEXT: por %xmm3, %xmm0
1427 ; SSE2-NEXT: paddw %xmm1, %xmm1
1428 ; SSE2-NEXT: psraw $15, %xmm1
1429 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1430 ; SSE2-NEXT: pandn %xmm0, %xmm2
1431 ; SSE2-NEXT: psraw $1, %xmm0
1432 ; SSE2-NEXT: pand %xmm1, %xmm0
1433 ; SSE2-NEXT: por %xmm2, %xmm0
1436 ; SSE41-LABEL: splatvar_shift_v8i8:
1438 ; SSE41-NEXT: movdqa %xmm1, %xmm2
1439 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1440 ; SSE41-NEXT: psllw $8, %xmm3
1441 ; SSE41-NEXT: movdqa %xmm3, %xmm1
1442 ; SSE41-NEXT: psraw $8, %xmm1
1443 ; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero,xmm2[0],zero
1444 ; SSE41-NEXT: movdqa %xmm2, %xmm0
1445 ; SSE41-NEXT: psllw $12, %xmm0
1446 ; SSE41-NEXT: psllw $4, %xmm2
1447 ; SSE41-NEXT: por %xmm0, %xmm2
1448 ; SSE41-NEXT: movdqa %xmm2, %xmm4
1449 ; SSE41-NEXT: paddw %xmm2, %xmm4
1450 ; SSE41-NEXT: psraw $15, %xmm3
1451 ; SSE41-NEXT: movdqa %xmm2, %xmm0
1452 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
1453 ; SSE41-NEXT: movdqa %xmm1, %xmm2
1454 ; SSE41-NEXT: psraw $4, %xmm2
1455 ; SSE41-NEXT: movdqa %xmm4, %xmm0
1456 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
1457 ; SSE41-NEXT: movdqa %xmm1, %xmm2
1458 ; SSE41-NEXT: psraw $2, %xmm2
1459 ; SSE41-NEXT: paddw %xmm4, %xmm4
1460 ; SSE41-NEXT: movdqa %xmm4, %xmm0
1461 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
1462 ; SSE41-NEXT: movdqa %xmm1, %xmm2
1463 ; SSE41-NEXT: psraw $1, %xmm2
1464 ; SSE41-NEXT: paddw %xmm4, %xmm4
1465 ; SSE41-NEXT: movdqa %xmm4, %xmm0
1466 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
1467 ; SSE41-NEXT: movdqa %xmm1, %xmm0
1470 ; AVX1-LABEL: splatvar_shift_v8i8:
1472 ; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0
1473 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm2
1474 ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero
1475 ; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3
1476 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
1477 ; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
1478 ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3
1479 ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
1480 ; AVX1-NEXT: vpblendvb %xmm1, %xmm0, %xmm2, %xmm0
1481 ; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1
1482 ; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
1483 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
1484 ; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm2
1485 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
1486 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
1487 ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
1488 ; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
1491 ; AVX2-LABEL: splatvar_shift_v8i8:
1493 ; AVX2-NEXT: vpsllw $8, %xmm0, %xmm0
1494 ; AVX2-NEXT: vpsraw $8, %xmm0, %xmm0
1495 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero
1496 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
1497 ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
1498 ; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
1499 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1500 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
1501 ; AVX2-NEXT: vzeroupper
1504 ; XOP-LABEL: splatvar_shift_v8i8:
1506 ; XOP-NEXT: vpsllw $8, %xmm0, %xmm0
1507 ; XOP-NEXT: vpsraw $8, %xmm0, %xmm0
1508 ; XOP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero
1509 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
1510 ; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
1511 ; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
1514 ; AVX512DQ-LABEL: splatvar_shift_v8i8:
1515 ; AVX512DQ: # %bb.0:
1516 ; AVX512DQ-NEXT: vpsllw $8, %xmm0, %xmm0
1517 ; AVX512DQ-NEXT: vpsraw $8, %xmm0, %xmm0
1518 ; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero
1519 ; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
1520 ; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
1521 ; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
1522 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
1523 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1524 ; AVX512DQ-NEXT: vzeroupper
1525 ; AVX512DQ-NEXT: retq
1527 ; AVX512BW-LABEL: splatvar_shift_v8i8:
1528 ; AVX512BW: # %bb.0:
1529 ; AVX512BW-NEXT: vpsllw $8, %xmm0, %xmm0
1530 ; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0
1531 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero
1532 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
1533 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1534 ; AVX512BW-NEXT: vzeroupper
1535 ; AVX512BW-NEXT: retq
1537 ; AVX512DQVL-LABEL: splatvar_shift_v8i8:
1538 ; AVX512DQVL: # %bb.0:
1539 ; AVX512DQVL-NEXT: vpsllw $8, %xmm0, %xmm0
1540 ; AVX512DQVL-NEXT: vpsraw $8, %xmm0, %xmm0
1541 ; AVX512DQVL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero
1542 ; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
1543 ; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
1544 ; AVX512DQVL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
1545 ; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
1546 ; AVX512DQVL-NEXT: vzeroupper
1547 ; AVX512DQVL-NEXT: retq
1549 ; AVX512BWVL-LABEL: splatvar_shift_v8i8:
1550 ; AVX512BWVL: # %bb.0:
1551 ; AVX512BWVL-NEXT: vpsllw $8, %xmm0, %xmm0
1552 ; AVX512BWVL-NEXT: vpsraw $8, %xmm0, %xmm0
1553 ; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero,xmm1[0],zero
1554 ; AVX512BWVL-NEXT: vpsravw %xmm1, %xmm0, %xmm0
1555 ; AVX512BWVL-NEXT: retq
1557 ; X32-SSE-LABEL: splatvar_shift_v8i8:
1559 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
1560 ; X32-SSE-NEXT: psllw $8, %xmm2
1561 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
1562 ; X32-SSE-NEXT: psraw $8, %xmm3
1563 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
1564 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
1565 ; X32-SSE-NEXT: psllw $12, %xmm1
1566 ; X32-SSE-NEXT: movdqa %xmm1, %xmm0
1567 ; X32-SSE-NEXT: psraw $15, %xmm0
1568 ; X32-SSE-NEXT: psraw $15, %xmm2
1569 ; X32-SSE-NEXT: pand %xmm0, %xmm2
1570 ; X32-SSE-NEXT: pandn %xmm3, %xmm0
1571 ; X32-SSE-NEXT: por %xmm2, %xmm0
1572 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
1573 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
1574 ; X32-SSE-NEXT: psraw $15, %xmm2
1575 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
1576 ; X32-SSE-NEXT: pandn %xmm0, %xmm3
1577 ; X32-SSE-NEXT: psraw $4, %xmm0
1578 ; X32-SSE-NEXT: pand %xmm2, %xmm0
1579 ; X32-SSE-NEXT: por %xmm3, %xmm0
1580 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
1581 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
1582 ; X32-SSE-NEXT: psraw $15, %xmm2
1583 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
1584 ; X32-SSE-NEXT: pandn %xmm0, %xmm3
1585 ; X32-SSE-NEXT: psraw $2, %xmm0
1586 ; X32-SSE-NEXT: pand %xmm2, %xmm0
1587 ; X32-SSE-NEXT: por %xmm3, %xmm0
1588 ; X32-SSE-NEXT: paddw %xmm1, %xmm1
1589 ; X32-SSE-NEXT: psraw $15, %xmm1
1590 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2
1591 ; X32-SSE-NEXT: pandn %xmm0, %xmm2
1592 ; X32-SSE-NEXT: psraw $1, %xmm0
1593 ; X32-SSE-NEXT: pand %xmm1, %xmm0
1594 ; X32-SSE-NEXT: por %xmm2, %xmm0
1595 ; X32-SSE-NEXT: retl
1596 %splat = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
1597 %shift = ashr <8 x i8> %a, %splat
1601 define <4 x i8> @splatvar_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
1602 ; SSE2-LABEL: splatvar_shift_v4i8:
1604 ; SSE2-NEXT: pslld $24, %xmm0
1605 ; SSE2-NEXT: psrad $24, %xmm0
1606 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
1607 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
1608 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
1609 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1610 ; SSE2-NEXT: psrad %xmm1, %xmm3
1611 ; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7]
1612 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1613 ; SSE2-NEXT: psrad %xmm4, %xmm1
1614 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
1615 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
1616 ; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
1617 ; SSE2-NEXT: movdqa %xmm0, %xmm4
1618 ; SSE2-NEXT: psrad %xmm3, %xmm4
1619 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
1620 ; SSE2-NEXT: psrad %xmm2, %xmm0
1621 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
1622 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
1623 ; SSE2-NEXT: movaps %xmm1, %xmm0
1626 ; SSE41-LABEL: splatvar_shift_v4i8:
1628 ; SSE41-NEXT: pslld $24, %xmm0
1629 ; SSE41-NEXT: psrad $24, %xmm0
1630 ; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
1631 ; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
1632 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1633 ; SSE41-NEXT: psrad %xmm2, %xmm3
1634 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
1635 ; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
1636 ; SSE41-NEXT: movdqa %xmm0, %xmm5
1637 ; SSE41-NEXT: psrad %xmm4, %xmm5
1638 ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
1639 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
1640 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1641 ; SSE41-NEXT: psrad %xmm1, %xmm3
1642 ; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
1643 ; SSE41-NEXT: psrad %xmm1, %xmm0
1644 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
1645 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
1648 ; AVX1-LABEL: splatvar_shift_v4i8:
1650 ; AVX1-NEXT: vpslld $24, %xmm0, %xmm0
1651 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
1652 ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
1653 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1654 ; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
1655 ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
1656 ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
1657 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
1658 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
1659 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
1660 ; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
1661 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
1662 ; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
1663 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
1664 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1667 ; AVX2-LABEL: splatvar_shift_v4i8:
1669 ; AVX2-NEXT: vpslld $24, %xmm0, %xmm0
1670 ; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
1671 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
1672 ; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
1675 ; XOPAVX1-LABEL: splatvar_shift_v4i8:
1677 ; XOPAVX1-NEXT: vpslld $24, %xmm0, %xmm0
1678 ; XOPAVX1-NEXT: vpsrad $24, %xmm0, %xmm0
1679 ; XOPAVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
1680 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1681 ; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
1682 ; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
1683 ; XOPAVX1-NEXT: retq
1685 ; XOPAVX2-LABEL: splatvar_shift_v4i8:
1687 ; XOPAVX2-NEXT: vpslld $24, %xmm0, %xmm0
1688 ; XOPAVX2-NEXT: vpsrad $24, %xmm0, %xmm0
1689 ; XOPAVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
1690 ; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
1691 ; XOPAVX2-NEXT: retq
1693 ; AVX512-LABEL: splatvar_shift_v4i8:
1695 ; AVX512-NEXT: vpslld $24, %xmm0, %xmm0
1696 ; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0
1697 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
1698 ; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
1701 ; AVX512VL-LABEL: splatvar_shift_v4i8:
1702 ; AVX512VL: # %bb.0:
1703 ; AVX512VL-NEXT: vpslld $24, %xmm0, %xmm0
1704 ; AVX512VL-NEXT: vpsrad $24, %xmm0, %xmm0
1705 ; AVX512VL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero,xmm1[0],zero,zero,zero
1706 ; AVX512VL-NEXT: vpsravd %xmm1, %xmm0, %xmm0
1707 ; AVX512VL-NEXT: retq
1709 ; X32-SSE-LABEL: splatvar_shift_v4i8:
1711 ; X32-SSE-NEXT: pslld $24, %xmm0
1712 ; X32-SSE-NEXT: psrad $24, %xmm0
1713 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
1714 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2
1715 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
1716 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
1717 ; X32-SSE-NEXT: psrad %xmm1, %xmm3
1718 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,1,4,5,6,7]
1719 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
1720 ; X32-SSE-NEXT: psrad %xmm4, %xmm1
1721 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
1722 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
1723 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
1724 ; X32-SSE-NEXT: movdqa %xmm0, %xmm4
1725 ; X32-SSE-NEXT: psrad %xmm3, %xmm4
1726 ; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
1727 ; X32-SSE-NEXT: psrad %xmm2, %xmm0
1728 ; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
1729 ; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
1730 ; X32-SSE-NEXT: movaps %xmm1, %xmm0
1731 ; X32-SSE-NEXT: retl
1732 %splat = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
1733 %shift = ashr <4 x i8> %a, %splat
1737 define <2 x i8> @splatvar_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
1738 ; SSE2-LABEL: splatvar_shift_v2i8:
1740 ; SSE2-NEXT: psllq $56, %xmm0
1741 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1742 ; SSE2-NEXT: psrad $31, %xmm2
1743 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
1744 ; SSE2-NEXT: psrad $24, %xmm0
1745 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
1746 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1747 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
1748 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
1749 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1750 ; SSE2-NEXT: movdqa %xmm2, %xmm3
1751 ; SSE2-NEXT: psrlq %xmm1, %xmm3
1752 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
1753 ; SSE2-NEXT: psrlq %xmm4, %xmm2
1754 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
1755 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1756 ; SSE2-NEXT: psrlq %xmm1, %xmm3
1757 ; SSE2-NEXT: psrlq %xmm4, %xmm0
1758 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
1759 ; SSE2-NEXT: xorpd %xmm2, %xmm0
1760 ; SSE2-NEXT: psubq %xmm2, %xmm0
1763 ; SSE41-LABEL: splatvar_shift_v2i8:
1765 ; SSE41-NEXT: psllq $56, %xmm0
1766 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1767 ; SSE41-NEXT: psrad $31, %xmm2
1768 ; SSE41-NEXT: psrad $24, %xmm0
1769 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
1770 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1771 ; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
1772 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1773 ; SSE41-NEXT: movdqa %xmm2, %xmm3
1774 ; SSE41-NEXT: psrlq %xmm1, %xmm3
1775 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
1776 ; SSE41-NEXT: psrlq %xmm4, %xmm2
1777 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
1778 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1779 ; SSE41-NEXT: psrlq %xmm4, %xmm3
1780 ; SSE41-NEXT: psrlq %xmm1, %xmm0
1781 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
1782 ; SSE41-NEXT: pxor %xmm2, %xmm0
1783 ; SSE41-NEXT: psubq %xmm2, %xmm0
1786 ; AVX1-LABEL: splatvar_shift_v2i8:
1788 ; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0
1789 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
1790 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
1791 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
1792 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
1793 ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
1794 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1795 ; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3
1796 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
1797 ; AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm2
1798 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
1799 ; AVX1-NEXT: vpsrlq %xmm4, %xmm0, %xmm3
1800 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
1801 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
1802 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
1803 ; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
1806 ; AVX2-LABEL: splatvar_shift_v2i8:
1808 ; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0
1809 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2
1810 ; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
1811 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
1812 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
1813 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
1814 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1815 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm2
1816 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
1817 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
1818 ; AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0
1821 ; XOP-LABEL: splatvar_shift_v2i8:
1823 ; XOP-NEXT: vpsllq $56, %xmm0, %xmm0
1824 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
1825 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
1826 ; XOP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
1827 ; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
1828 ; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
1831 ; AVX512-LABEL: splatvar_shift_v2i8:
1833 ; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0
1834 ; AVX512-NEXT: vpsraq $56, %zmm0, %zmm0
1835 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
1836 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
1837 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1838 ; AVX512-NEXT: vzeroupper
1841 ; AVX512VL-LABEL: splatvar_shift_v2i8:
1842 ; AVX512VL: # %bb.0:
1843 ; AVX512VL-NEXT: vpsllq $56, %xmm0, %xmm0
1844 ; AVX512VL-NEXT: vpsraq $56, %xmm0, %xmm0
1845 ; AVX512VL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
1846 ; AVX512VL-NEXT: vpsravq %xmm1, %xmm0, %xmm0
1847 ; AVX512VL-NEXT: retq
1849 ; X32-SSE-LABEL: splatvar_shift_v2i8:
1851 ; X32-SSE-NEXT: psllq $56, %xmm0
1852 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
1853 ; X32-SSE-NEXT: psrad $31, %xmm2
1854 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
1855 ; X32-SSE-NEXT: psrad $24, %xmm0
1856 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
1857 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
1858 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
1859 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
1860 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
1861 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3
1862 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
1863 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
1864 ; X32-SSE-NEXT: psrlq %xmm4, %xmm2
1865 ; X32-SSE-NEXT: movsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
1866 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
1867 ; X32-SSE-NEXT: psrlq %xmm1, %xmm3
1868 ; X32-SSE-NEXT: psrlq %xmm4, %xmm0
1869 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
1870 ; X32-SSE-NEXT: xorpd %xmm2, %xmm0
1871 ; X32-SSE-NEXT: psubq %xmm2, %xmm0
1872 ; X32-SSE-NEXT: retl
1873 %splat = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
1874 %shift = ashr <2 x i8> %a, %splat
1882 define <2 x i32> @constant_shift_v2i32(<2 x i32> %a) nounwind {
1883 ; SSE2-LABEL: constant_shift_v2i32:
1885 ; SSE2-NEXT: psllq $32, %xmm0
1886 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
1887 ; SSE2-NEXT: psrad $31, %xmm0
1888 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
1889 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1890 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1891 ; SSE2-NEXT: psrlq $4, %xmm0
1892 ; SSE2-NEXT: psrlq $5, %xmm1
1893 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1894 ; SSE2-NEXT: movapd {{.*#+}} xmm0 = [576460752303423488,288230376151711744]
1895 ; SSE2-NEXT: xorpd %xmm0, %xmm1
1896 ; SSE2-NEXT: psubq %xmm0, %xmm1
1897 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1900 ; SSE41-LABEL: constant_shift_v2i32:
1902 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1903 ; SSE41-NEXT: psllq $32, %xmm1
1904 ; SSE41-NEXT: psrad $31, %xmm1
1905 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
1906 ; SSE41-NEXT: movdqa %xmm1, %xmm0
1907 ; SSE41-NEXT: psrlq $5, %xmm0
1908 ; SSE41-NEXT: psrlq $4, %xmm1
1909 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
1910 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [576460752303423488,288230376151711744]
1911 ; SSE41-NEXT: pxor %xmm0, %xmm1
1912 ; SSE41-NEXT: psubq %xmm0, %xmm1
1913 ; SSE41-NEXT: movdqa %xmm1, %xmm0
1916 ; AVX1-LABEL: constant_shift_v2i32:
1918 ; AVX1-NEXT: vpsllq $32, %xmm0, %xmm1
1919 ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
1920 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
1921 ; AVX1-NEXT: vpsrlq $5, %xmm0, %xmm1
1922 ; AVX1-NEXT: vpsrlq $4, %xmm0, %xmm0
1923 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
1924 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [576460752303423488,288230376151711744]
1925 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
1926 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
1929 ; AVX2-LABEL: constant_shift_v2i32:
1931 ; AVX2-NEXT: vpsllq $32, %xmm0, %xmm1
1932 ; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
1933 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
1934 ; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
1935 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [576460752303423488,288230376151711744]
1936 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
1937 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
1940 ; XOP-LABEL: constant_shift_v2i32:
1942 ; XOP-NEXT: vpsllq $32, %xmm0, %xmm0
1943 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
1944 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
1947 ; AVX512-LABEL: constant_shift_v2i32:
1949 ; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0
1950 ; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0
1951 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5]
1952 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
1953 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1954 ; AVX512-NEXT: vzeroupper
1957 ; AVX512VL-LABEL: constant_shift_v2i32:
1958 ; AVX512VL: # %bb.0:
1959 ; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0
1960 ; AVX512VL-NEXT: vpsraq $32, %xmm0, %xmm0
1961 ; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0
1962 ; AVX512VL-NEXT: retq
1964 ; X32-SSE-LABEL: constant_shift_v2i32:
1966 ; X32-SSE-NEXT: psllq $32, %xmm0
1967 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
1968 ; X32-SSE-NEXT: psrad $31, %xmm0
1969 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
1970 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1971 ; X32-SSE-NEXT: movdqa %xmm1, %xmm0
1972 ; X32-SSE-NEXT: psrlq $4, %xmm0
1973 ; X32-SSE-NEXT: psrlq $5, %xmm1
1974 ; X32-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
1975 ; X32-SSE-NEXT: movapd {{.*#+}} xmm0 = [3.7857669957336791E-270,2.0522684006491881E-289]
1976 ; X32-SSE-NEXT: xorpd %xmm0, %xmm1
1977 ; X32-SSE-NEXT: psubq %xmm0, %xmm1
1978 ; X32-SSE-NEXT: movdqa %xmm1, %xmm0
1979 ; X32-SSE-NEXT: retl
1980 %shift = ashr <2 x i32> %a, <i32 4, i32 5>
1981 ret <2 x i32> %shift
1984 define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind {
1985 ; SSE2-LABEL: constant_shift_v4i16:
1987 ; SSE2-NEXT: pslld $16, %xmm0
1988 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1989 ; SSE2-NEXT: psrad $16, %xmm1
1990 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1991 ; SSE2-NEXT: psrad $19, %xmm2
1992 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1993 ; SSE2-NEXT: psrad $18, %xmm3
1994 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
1995 ; SSE2-NEXT: psrad $17, %xmm0
1996 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1997 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
1998 ; SSE2-NEXT: movaps %xmm1, %xmm0
2001 ; SSE41-LABEL: constant_shift_v4i16:
2003 ; SSE41-NEXT: pslld $16, %xmm0
2004 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2005 ; SSE41-NEXT: psrad $16, %xmm1
2006 ; SSE41-NEXT: movdqa %xmm0, %xmm2
2007 ; SSE41-NEXT: psrad $19, %xmm2
2008 ; SSE41-NEXT: movdqa %xmm0, %xmm3
2009 ; SSE41-NEXT: psrad $17, %xmm3
2010 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
2011 ; SSE41-NEXT: psrad $18, %xmm0
2012 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
2013 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
2016 ; AVX1-LABEL: constant_shift_v4i16:
2018 ; AVX1-NEXT: vpslld $16, %xmm0, %xmm0
2019 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm1
2020 ; AVX1-NEXT: vpsrad $19, %xmm0, %xmm2
2021 ; AVX1-NEXT: vpsrad $17, %xmm0, %xmm3
2022 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
2023 ; AVX1-NEXT: vpsrad $18, %xmm0, %xmm0
2024 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
2025 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
2028 ; AVX2-LABEL: constant_shift_v4i16:
2030 ; AVX2-NEXT: vpslld $16, %xmm0, %xmm0
2031 ; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
2032 ; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
2035 ; XOPAVX1-LABEL: constant_shift_v4i16:
2037 ; XOPAVX1-NEXT: vpslld $16, %xmm0, %xmm0
2038 ; XOPAVX1-NEXT: vpsrad $16, %xmm0, %xmm0
2039 ; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
2040 ; XOPAVX1-NEXT: retq
2042 ; XOPAVX2-LABEL: constant_shift_v4i16:
2044 ; XOPAVX2-NEXT: vpslld $16, %xmm0, %xmm0
2045 ; XOPAVX2-NEXT: vpsrad $16, %xmm0, %xmm0
2046 ; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
2047 ; XOPAVX2-NEXT: retq
2049 ; AVX512-LABEL: constant_shift_v4i16:
2051 ; AVX512-NEXT: vpslld $16, %xmm0, %xmm0
2052 ; AVX512-NEXT: vpsrad $16, %xmm0, %xmm0
2053 ; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
2056 ; AVX512VL-LABEL: constant_shift_v4i16:
2057 ; AVX512VL: # %bb.0:
2058 ; AVX512VL-NEXT: vpslld $16, %xmm0, %xmm0
2059 ; AVX512VL-NEXT: vpsrad $16, %xmm0, %xmm0
2060 ; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
2061 ; AVX512VL-NEXT: retq
2063 ; X32-SSE-LABEL: constant_shift_v4i16:
2065 ; X32-SSE-NEXT: pslld $16, %xmm0
2066 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2067 ; X32-SSE-NEXT: psrad $16, %xmm1
2068 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
2069 ; X32-SSE-NEXT: psrad $19, %xmm2
2070 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
2071 ; X32-SSE-NEXT: psrad $18, %xmm3
2072 ; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
2073 ; X32-SSE-NEXT: psrad $17, %xmm0
2074 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2075 ; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
2076 ; X32-SSE-NEXT: movaps %xmm1, %xmm0
2077 ; X32-SSE-NEXT: retl
2078 %shift = ashr <4 x i16> %a, <i16 0, i16 1, i16 2, i16 3>
2079 ret <4 x i16> %shift
2082 define <2 x i16> @constant_shift_v2i16(<2 x i16> %a) nounwind {
2083 ; SSE2-LABEL: constant_shift_v2i16:
2085 ; SSE2-NEXT: psllq $48, %xmm0
2086 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2087 ; SSE2-NEXT: psrad $31, %xmm1
2088 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2089 ; SSE2-NEXT: psrad $16, %xmm0
2090 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2091 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2092 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2093 ; SSE2-NEXT: psrlq $2, %xmm1
2094 ; SSE2-NEXT: psrlq $3, %xmm0
2095 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2096 ; SSE2-NEXT: movapd {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976]
2097 ; SSE2-NEXT: xorpd %xmm1, %xmm0
2098 ; SSE2-NEXT: psubq %xmm1, %xmm0
2101 ; SSE41-LABEL: constant_shift_v2i16:
2103 ; SSE41-NEXT: psllq $48, %xmm0
2104 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2105 ; SSE41-NEXT: psrad $31, %xmm1
2106 ; SSE41-NEXT: psrad $16, %xmm0
2107 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2108 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2109 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2110 ; SSE41-NEXT: psrlq $3, %xmm1
2111 ; SSE41-NEXT: psrlq $2, %xmm0
2112 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
2113 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976]
2114 ; SSE41-NEXT: pxor %xmm1, %xmm0
2115 ; SSE41-NEXT: psubq %xmm1, %xmm0
2118 ; AVX1-LABEL: constant_shift_v2i16:
2120 ; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
2121 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
2122 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
2123 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2124 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2125 ; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm1
2126 ; AVX1-NEXT: vpsrlq $2, %xmm0, %xmm0
2127 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
2128 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976]
2129 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
2130 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
2133 ; AVX2-LABEL: constant_shift_v2i16:
2135 ; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0
2136 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
2137 ; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
2138 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2139 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
2140 ; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
2141 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976]
2142 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
2143 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
2146 ; XOP-LABEL: constant_shift_v2i16:
2148 ; XOP-NEXT: vpsllq $48, %xmm0, %xmm0
2149 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2150 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2153 ; AVX512-LABEL: constant_shift_v2i16:
2155 ; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0
2156 ; AVX512-NEXT: vpsraq $48, %zmm0, %zmm0
2157 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3]
2158 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
2159 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
2160 ; AVX512-NEXT: vzeroupper
2163 ; AVX512VL-LABEL: constant_shift_v2i16:
2164 ; AVX512VL: # %bb.0:
2165 ; AVX512VL-NEXT: vpsllq $48, %xmm0, %xmm0
2166 ; AVX512VL-NEXT: vpsraq $48, %xmm0, %xmm0
2167 ; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0
2168 ; AVX512VL-NEXT: retq
2170 ; X32-SSE-LABEL: constant_shift_v2i16:
2172 ; X32-SSE-NEXT: psllq $48, %xmm0
2173 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2174 ; X32-SSE-NEXT: psrad $31, %xmm1
2175 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2176 ; X32-SSE-NEXT: psrad $16, %xmm0
2177 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2178 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2179 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2180 ; X32-SSE-NEXT: psrlq $2, %xmm1
2181 ; X32-SSE-NEXT: psrlq $3, %xmm0
2182 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2183 ; X32-SSE-NEXT: movapd {{.*#+}} xmm1 = [1.4916681462400413E-154,1.2882297539194267E-231]
2184 ; X32-SSE-NEXT: xorpd %xmm1, %xmm0
2185 ; X32-SSE-NEXT: psubq %xmm1, %xmm0
2186 ; X32-SSE-NEXT: retl
2187 %shift = ashr <2 x i16> %a, <i16 2, i16 3>
2188 ret <2 x i16> %shift
2191 define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind {
2192 ; SSE2-LABEL: constant_shift_v8i8:
2194 ; SSE2-NEXT: psllw $8, %xmm0
2195 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2196 ; SSE2-NEXT: psraw $8, %xmm1
2197 ; SSE2-NEXT: psraw $12, %xmm0
2198 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2199 ; SSE2-NEXT: movapd %xmm0, %xmm1
2200 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
2201 ; SSE2-NEXT: psraw $2, %xmm0
2202 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2203 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2204 ; SSE2-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
2205 ; SSE2-NEXT: movaps %xmm1, %xmm0
2206 ; SSE2-NEXT: andps %xmm2, %xmm0
2207 ; SSE2-NEXT: psraw $1, %xmm1
2208 ; SSE2-NEXT: andnps %xmm1, %xmm2
2209 ; SSE2-NEXT: orps %xmm2, %xmm0
2212 ; SSE41-LABEL: constant_shift_v8i8:
2214 ; SSE41-NEXT: psllw $8, %xmm0
2215 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2216 ; SSE41-NEXT: psraw $8, %xmm1
2217 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <u,32768,16384,8192,4096,2048,1024,512>
2218 ; SSE41-NEXT: pmulhw %xmm1, %xmm2
2219 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
2220 ; SSE41-NEXT: psraw $9, %xmm0
2221 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7]
2224 ; AVX-LABEL: constant_shift_v8i8:
2226 ; AVX-NEXT: vpsllw $8, %xmm0, %xmm0
2227 ; AVX-NEXT: vpsraw $8, %xmm0, %xmm1
2228 ; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm1, %xmm2
2229 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
2230 ; AVX-NEXT: vpsraw $9, %xmm0, %xmm0
2231 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7]
2234 ; XOP-LABEL: constant_shift_v8i8:
2236 ; XOP-NEXT: vpsllw $8, %xmm0, %xmm0
2237 ; XOP-NEXT: vpsraw $8, %xmm0, %xmm0
2238 ; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0
2241 ; AVX512DQ-LABEL: constant_shift_v8i8:
2242 ; AVX512DQ: # %bb.0:
2243 ; AVX512DQ-NEXT: vpsllw $8, %xmm0, %xmm0
2244 ; AVX512DQ-NEXT: vpsraw $8, %xmm0, %xmm0
2245 ; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
2246 ; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
2247 ; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
2248 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
2249 ; AVX512DQ-NEXT: vzeroupper
2250 ; AVX512DQ-NEXT: retq
2252 ; AVX512BW-LABEL: constant_shift_v8i8:
2253 ; AVX512BW: # %bb.0:
2254 ; AVX512BW-NEXT: vpsllw $8, %xmm0, %xmm0
2255 ; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0
2256 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
2257 ; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
2258 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
2259 ; AVX512BW-NEXT: vzeroupper
2260 ; AVX512BW-NEXT: retq
2262 ; AVX512DQVL-LABEL: constant_shift_v8i8:
2263 ; AVX512DQVL: # %bb.0:
2264 ; AVX512DQVL-NEXT: vpsllw $8, %xmm0, %xmm0
2265 ; AVX512DQVL-NEXT: vpsraw $8, %xmm0, %xmm0
2266 ; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
2267 ; AVX512DQVL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
2268 ; AVX512DQVL-NEXT: vpmovdw %ymm0, %xmm0
2269 ; AVX512DQVL-NEXT: vzeroupper
2270 ; AVX512DQVL-NEXT: retq
2272 ; AVX512BWVL-LABEL: constant_shift_v8i8:
2273 ; AVX512BWVL: # %bb.0:
2274 ; AVX512BWVL-NEXT: vpsllw $8, %xmm0, %xmm0
2275 ; AVX512BWVL-NEXT: vpsraw $8, %xmm0, %xmm0
2276 ; AVX512BWVL-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0
2277 ; AVX512BWVL-NEXT: retq
2279 ; X32-SSE-LABEL: constant_shift_v8i8:
2281 ; X32-SSE-NEXT: psllw $8, %xmm0
2282 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2283 ; X32-SSE-NEXT: psraw $8, %xmm1
2284 ; X32-SSE-NEXT: psraw $12, %xmm0
2285 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2286 ; X32-SSE-NEXT: movapd %xmm0, %xmm1
2287 ; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3]
2288 ; X32-SSE-NEXT: psraw $2, %xmm0
2289 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2290 ; X32-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2291 ; X32-SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
2292 ; X32-SSE-NEXT: movaps %xmm1, %xmm0
2293 ; X32-SSE-NEXT: andps %xmm2, %xmm0
2294 ; X32-SSE-NEXT: psraw $1, %xmm1
2295 ; X32-SSE-NEXT: andnps %xmm1, %xmm2
2296 ; X32-SSE-NEXT: orps %xmm2, %xmm0
2297 ; X32-SSE-NEXT: retl
2298 %shift = ashr <8 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
2302 define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind {
2303 ; SSE2-LABEL: constant_shift_v4i8:
2305 ; SSE2-NEXT: pslld $24, %xmm0
2306 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2307 ; SSE2-NEXT: psrad $24, %xmm1
2308 ; SSE2-NEXT: movdqa %xmm0, %xmm2
2309 ; SSE2-NEXT: psrad $27, %xmm2
2310 ; SSE2-NEXT: movdqa %xmm0, %xmm3
2311 ; SSE2-NEXT: psrad $26, %xmm3
2312 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
2313 ; SSE2-NEXT: psrad $25, %xmm0
2314 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2315 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
2316 ; SSE2-NEXT: movaps %xmm1, %xmm0
2319 ; SSE41-LABEL: constant_shift_v4i8:
2321 ; SSE41-NEXT: pslld $24, %xmm0
2322 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2323 ; SSE41-NEXT: psrad $24, %xmm1
2324 ; SSE41-NEXT: movdqa %xmm0, %xmm2
2325 ; SSE41-NEXT: psrad $27, %xmm2
2326 ; SSE41-NEXT: movdqa %xmm0, %xmm3
2327 ; SSE41-NEXT: psrad $25, %xmm3
2328 ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
2329 ; SSE41-NEXT: psrad $26, %xmm0
2330 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
2331 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
2334 ; AVX1-LABEL: constant_shift_v4i8:
2336 ; AVX1-NEXT: vpslld $24, %xmm0, %xmm0
2337 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm1
2338 ; AVX1-NEXT: vpsrad $27, %xmm0, %xmm2
2339 ; AVX1-NEXT: vpsrad $25, %xmm0, %xmm3
2340 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
2341 ; AVX1-NEXT: vpsrad $26, %xmm0, %xmm0
2342 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
2343 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
2346 ; AVX2-LABEL: constant_shift_v4i8:
2348 ; AVX2-NEXT: vpslld $24, %xmm0, %xmm0
2349 ; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
2350 ; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
2353 ; XOPAVX1-LABEL: constant_shift_v4i8:
2355 ; XOPAVX1-NEXT: vpslld $24, %xmm0, %xmm0
2356 ; XOPAVX1-NEXT: vpsrad $24, %xmm0, %xmm0
2357 ; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
2358 ; XOPAVX1-NEXT: retq
2360 ; XOPAVX2-LABEL: constant_shift_v4i8:
2362 ; XOPAVX2-NEXT: vpslld $24, %xmm0, %xmm0
2363 ; XOPAVX2-NEXT: vpsrad $24, %xmm0, %xmm0
2364 ; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
2365 ; XOPAVX2-NEXT: retq
2367 ; AVX512-LABEL: constant_shift_v4i8:
2369 ; AVX512-NEXT: vpslld $24, %xmm0, %xmm0
2370 ; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0
2371 ; AVX512-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
2374 ; AVX512VL-LABEL: constant_shift_v4i8:
2375 ; AVX512VL: # %bb.0:
2376 ; AVX512VL-NEXT: vpslld $24, %xmm0, %xmm0
2377 ; AVX512VL-NEXT: vpsrad $24, %xmm0, %xmm0
2378 ; AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
2379 ; AVX512VL-NEXT: retq
2381 ; X32-SSE-LABEL: constant_shift_v4i8:
2383 ; X32-SSE-NEXT: pslld $24, %xmm0
2384 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2385 ; X32-SSE-NEXT: psrad $24, %xmm1
2386 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
2387 ; X32-SSE-NEXT: psrad $27, %xmm2
2388 ; X32-SSE-NEXT: movdqa %xmm0, %xmm3
2389 ; X32-SSE-NEXT: psrad $26, %xmm3
2390 ; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
2391 ; X32-SSE-NEXT: psrad $25, %xmm0
2392 ; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2393 ; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
2394 ; X32-SSE-NEXT: movaps %xmm1, %xmm0
2395 ; X32-SSE-NEXT: retl
2396 %shift = ashr <4 x i8> %a, <i8 0, i8 1, i8 2, i8 3>
2400 define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind {
2401 ; SSE2-LABEL: constant_shift_v2i8:
2403 ; SSE2-NEXT: psllq $56, %xmm0
2404 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2405 ; SSE2-NEXT: psrad $31, %xmm1
2406 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2407 ; SSE2-NEXT: psrad $24, %xmm0
2408 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2409 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2410 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2411 ; SSE2-NEXT: psrlq $2, %xmm1
2412 ; SSE2-NEXT: psrlq $3, %xmm0
2413 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2414 ; SSE2-NEXT: movapd {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976]
2415 ; SSE2-NEXT: xorpd %xmm1, %xmm0
2416 ; SSE2-NEXT: psubq %xmm1, %xmm0
2419 ; SSE41-LABEL: constant_shift_v2i8:
2421 ; SSE41-NEXT: psllq $56, %xmm0
2422 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2423 ; SSE41-NEXT: psrad $31, %xmm1
2424 ; SSE41-NEXT: psrad $24, %xmm0
2425 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2426 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2427 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2428 ; SSE41-NEXT: psrlq $3, %xmm1
2429 ; SSE41-NEXT: psrlq $2, %xmm0
2430 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
2431 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976]
2432 ; SSE41-NEXT: pxor %xmm1, %xmm0
2433 ; SSE41-NEXT: psubq %xmm1, %xmm0
2436 ; AVX1-LABEL: constant_shift_v2i8:
2438 ; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0
2439 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
2440 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
2441 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2442 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2443 ; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm1
2444 ; AVX1-NEXT: vpsrlq $2, %xmm0, %xmm0
2445 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
2446 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976]
2447 ; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
2448 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
2451 ; AVX2-LABEL: constant_shift_v2i8:
2453 ; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0
2454 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
2455 ; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
2456 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2457 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
2458 ; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
2459 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [2305843009213693952,1152921504606846976]
2460 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
2461 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
2464 ; XOP-LABEL: constant_shift_v2i8:
2466 ; XOP-NEXT: vpsllq $56, %xmm0, %xmm0
2467 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2468 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2471 ; AVX512-LABEL: constant_shift_v2i8:
2473 ; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0
2474 ; AVX512-NEXT: vpsraq $56, %zmm0, %zmm0
2475 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [2,3]
2476 ; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
2477 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
2478 ; AVX512-NEXT: vzeroupper
2481 ; AVX512VL-LABEL: constant_shift_v2i8:
2482 ; AVX512VL: # %bb.0:
2483 ; AVX512VL-NEXT: vpsllq $56, %xmm0, %xmm0
2484 ; AVX512VL-NEXT: vpsraq $56, %xmm0, %xmm0
2485 ; AVX512VL-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0
2486 ; AVX512VL-NEXT: retq
2488 ; X32-SSE-LABEL: constant_shift_v2i8:
2490 ; X32-SSE-NEXT: psllq $56, %xmm0
2491 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2492 ; X32-SSE-NEXT: psrad $31, %xmm1
2493 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2494 ; X32-SSE-NEXT: psrad $24, %xmm0
2495 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2496 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2497 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2498 ; X32-SSE-NEXT: psrlq $2, %xmm1
2499 ; X32-SSE-NEXT: psrlq $3, %xmm0
2500 ; X32-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
2501 ; X32-SSE-NEXT: movapd {{.*#+}} xmm1 = [1.4916681462400413E-154,1.2882297539194267E-231]
2502 ; X32-SSE-NEXT: xorpd %xmm1, %xmm0
2503 ; X32-SSE-NEXT: psubq %xmm1, %xmm0
2504 ; X32-SSE-NEXT: retl
2505 %shift = ashr <2 x i8> %a, <i8 2, i8 3>
2510 ; Uniform Constant Shifts
2513 define <2 x i32> @splatconstant_shift_v2i32(<2 x i32> %a) nounwind {
2514 ; SSE2-LABEL: splatconstant_shift_v2i32:
2516 ; SSE2-NEXT: psllq $32, %xmm0
2517 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
2518 ; SSE2-NEXT: psrad $31, %xmm0
2519 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2520 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2521 ; SSE2-NEXT: movdqa %xmm1, %xmm0
2522 ; SSE2-NEXT: psrad $5, %xmm0
2523 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
2524 ; SSE2-NEXT: psrlq $5, %xmm1
2525 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
2526 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2529 ; SSE41-LABEL: splatconstant_shift_v2i32:
2531 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2532 ; SSE41-NEXT: psllq $32, %xmm1
2533 ; SSE41-NEXT: psrad $31, %xmm1
2534 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2535 ; SSE41-NEXT: movdqa %xmm1, %xmm0
2536 ; SSE41-NEXT: psrad $5, %xmm0
2537 ; SSE41-NEXT: psrlq $5, %xmm1
2538 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
2539 ; SSE41-NEXT: movdqa %xmm1, %xmm0
2542 ; AVX1-LABEL: splatconstant_shift_v2i32:
2544 ; AVX1-NEXT: vpsllq $32, %xmm0, %xmm1
2545 ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
2546 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2547 ; AVX1-NEXT: vpsrad $5, %xmm0, %xmm1
2548 ; AVX1-NEXT: vpsrlq $5, %xmm0, %xmm0
2549 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2552 ; AVX2-LABEL: splatconstant_shift_v2i32:
2554 ; AVX2-NEXT: vpsllq $32, %xmm0, %xmm1
2555 ; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1
2556 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
2557 ; AVX2-NEXT: vpsrad $5, %xmm0, %xmm1
2558 ; AVX2-NEXT: vpsrlq $5, %xmm0, %xmm0
2559 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
2562 ; XOP-LABEL: splatconstant_shift_v2i32:
2564 ; XOP-NEXT: vpsllq $32, %xmm0, %xmm0
2565 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2566 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2569 ; AVX512-LABEL: splatconstant_shift_v2i32:
2571 ; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0
2572 ; AVX512-NEXT: vpsraq $37, %zmm0, %zmm0
2573 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
2574 ; AVX512-NEXT: vzeroupper
2577 ; AVX512VL-LABEL: splatconstant_shift_v2i32:
2578 ; AVX512VL: # %bb.0:
2579 ; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0
2580 ; AVX512VL-NEXT: vpsraq $37, %xmm0, %xmm0
2581 ; AVX512VL-NEXT: retq
2583 ; X32-SSE-LABEL: splatconstant_shift_v2i32:
2585 ; X32-SSE-NEXT: psllq $32, %xmm0
2586 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
2587 ; X32-SSE-NEXT: psrad $31, %xmm0
2588 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2589 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
2590 ; X32-SSE-NEXT: movdqa %xmm1, %xmm0
2591 ; X32-SSE-NEXT: psrad $5, %xmm0
2592 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
2593 ; X32-SSE-NEXT: psrlq $5, %xmm1
2594 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
2595 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
2596 ; X32-SSE-NEXT: retl
2597 %shift = ashr <2 x i32> %a, <i32 5, i32 5>
2598 ret <2 x i32> %shift
2601 define <4 x i16> @splatconstant_shift_v4i16(<4 x i16> %a) nounwind {
2602 ; SSE-LABEL: splatconstant_shift_v4i16:
2604 ; SSE-NEXT: pslld $16, %xmm0
2605 ; SSE-NEXT: psrad $19, %xmm0
2608 ; AVX-LABEL: splatconstant_shift_v4i16:
2610 ; AVX-NEXT: vpslld $16, %xmm0, %xmm0
2611 ; AVX-NEXT: vpsrad $19, %xmm0, %xmm0
2614 ; XOP-LABEL: splatconstant_shift_v4i16:
2616 ; XOP-NEXT: vpslld $16, %xmm0, %xmm0
2617 ; XOP-NEXT: vpsrad $19, %xmm0, %xmm0
2620 ; AVX512-LABEL: splatconstant_shift_v4i16:
2622 ; AVX512-NEXT: vpslld $16, %xmm0, %xmm0
2623 ; AVX512-NEXT: vpsrad $19, %xmm0, %xmm0
2626 ; AVX512VL-LABEL: splatconstant_shift_v4i16:
2627 ; AVX512VL: # %bb.0:
2628 ; AVX512VL-NEXT: vpslld $16, %xmm0, %xmm0
2629 ; AVX512VL-NEXT: vpsrad $19, %xmm0, %xmm0
2630 ; AVX512VL-NEXT: retq
2632 ; X32-SSE-LABEL: splatconstant_shift_v4i16:
2634 ; X32-SSE-NEXT: pslld $16, %xmm0
2635 ; X32-SSE-NEXT: psrad $19, %xmm0
2636 ; X32-SSE-NEXT: retl
2637 %shift = ashr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
2638 ret <4 x i16> %shift
2641 define <2 x i16> @splatconstant_shift_v2i16(<2 x i16> %a) nounwind {
2642 ; SSE2-LABEL: splatconstant_shift_v2i16:
2644 ; SSE2-NEXT: psllq $48, %xmm0
2645 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2646 ; SSE2-NEXT: psrad $31, %xmm1
2647 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2648 ; SSE2-NEXT: psrad $16, %xmm0
2649 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2650 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2651 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2652 ; SSE2-NEXT: psrad $3, %xmm1
2653 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2654 ; SSE2-NEXT: psrlq $3, %xmm0
2655 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2656 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2659 ; SSE41-LABEL: splatconstant_shift_v2i16:
2661 ; SSE41-NEXT: psllq $48, %xmm0
2662 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2663 ; SSE41-NEXT: psrad $31, %xmm1
2664 ; SSE41-NEXT: psrad $16, %xmm0
2665 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2666 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2667 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2668 ; SSE41-NEXT: psrad $3, %xmm1
2669 ; SSE41-NEXT: psrlq $3, %xmm0
2670 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2673 ; AVX1-LABEL: splatconstant_shift_v2i16:
2675 ; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
2676 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
2677 ; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
2678 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2679 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2680 ; AVX1-NEXT: vpsrad $3, %xmm0, %xmm1
2681 ; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm0
2682 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2685 ; AVX2-LABEL: splatconstant_shift_v2i16:
2687 ; AVX2-NEXT: vpsllq $48, %xmm0, %xmm0
2688 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
2689 ; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
2690 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2691 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
2692 ; AVX2-NEXT: vpsrad $3, %xmm0, %xmm1
2693 ; AVX2-NEXT: vpsrlq $3, %xmm0, %xmm0
2694 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
2697 ; XOP-LABEL: splatconstant_shift_v2i16:
2699 ; XOP-NEXT: vpsllq $48, %xmm0, %xmm0
2700 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2701 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2704 ; AVX512-LABEL: splatconstant_shift_v2i16:
2706 ; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0
2707 ; AVX512-NEXT: vpsraq $51, %zmm0, %zmm0
2708 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
2709 ; AVX512-NEXT: vzeroupper
2712 ; AVX512VL-LABEL: splatconstant_shift_v2i16:
2713 ; AVX512VL: # %bb.0:
2714 ; AVX512VL-NEXT: vpsllq $48, %xmm0, %xmm0
2715 ; AVX512VL-NEXT: vpsraq $51, %xmm0, %xmm0
2716 ; AVX512VL-NEXT: retq
2718 ; X32-SSE-LABEL: splatconstant_shift_v2i16:
2720 ; X32-SSE-NEXT: psllq $48, %xmm0
2721 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2722 ; X32-SSE-NEXT: psrad $31, %xmm1
2723 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2724 ; X32-SSE-NEXT: psrad $16, %xmm0
2725 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2726 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2727 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2728 ; X32-SSE-NEXT: psrad $3, %xmm1
2729 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2730 ; X32-SSE-NEXT: psrlq $3, %xmm0
2731 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2732 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2733 ; X32-SSE-NEXT: retl
2734 %shift = ashr <2 x i16> %a, <i16 3, i16 3>
2735 ret <2 x i16> %shift
2738 define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind {
2739 ; SSE-LABEL: splatconstant_shift_v8i8:
2741 ; SSE-NEXT: psllw $8, %xmm0
2742 ; SSE-NEXT: psraw $11, %xmm0
2745 ; AVX-LABEL: splatconstant_shift_v8i8:
2747 ; AVX-NEXT: vpsllw $8, %xmm0, %xmm0
2748 ; AVX-NEXT: vpsraw $11, %xmm0, %xmm0
2751 ; XOP-LABEL: splatconstant_shift_v8i8:
2753 ; XOP-NEXT: vpsllw $8, %xmm0, %xmm0
2754 ; XOP-NEXT: vpsraw $11, %xmm0, %xmm0
2757 ; AVX512-LABEL: splatconstant_shift_v8i8:
2759 ; AVX512-NEXT: vpsllw $8, %xmm0, %xmm0
2760 ; AVX512-NEXT: vpsraw $11, %xmm0, %xmm0
2763 ; AVX512VL-LABEL: splatconstant_shift_v8i8:
2764 ; AVX512VL: # %bb.0:
2765 ; AVX512VL-NEXT: vpsllw $8, %xmm0, %xmm0
2766 ; AVX512VL-NEXT: vpsraw $11, %xmm0, %xmm0
2767 ; AVX512VL-NEXT: retq
2769 ; X32-SSE-LABEL: splatconstant_shift_v8i8:
2771 ; X32-SSE-NEXT: psllw $8, %xmm0
2772 ; X32-SSE-NEXT: psraw $11, %xmm0
2773 ; X32-SSE-NEXT: retl
2774 %shift = ashr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
2778 define <4 x i8> @splatconstant_shift_v4i8(<4 x i8> %a) nounwind {
2779 ; SSE-LABEL: splatconstant_shift_v4i8:
2781 ; SSE-NEXT: pslld $24, %xmm0
2782 ; SSE-NEXT: psrad $27, %xmm0
2785 ; AVX-LABEL: splatconstant_shift_v4i8:
2787 ; AVX-NEXT: vpslld $24, %xmm0, %xmm0
2788 ; AVX-NEXT: vpsrad $27, %xmm0, %xmm0
2791 ; XOP-LABEL: splatconstant_shift_v4i8:
2793 ; XOP-NEXT: vpslld $24, %xmm0, %xmm0
2794 ; XOP-NEXT: vpsrad $27, %xmm0, %xmm0
2797 ; AVX512-LABEL: splatconstant_shift_v4i8:
2799 ; AVX512-NEXT: vpslld $24, %xmm0, %xmm0
2800 ; AVX512-NEXT: vpsrad $27, %xmm0, %xmm0
2803 ; AVX512VL-LABEL: splatconstant_shift_v4i8:
2804 ; AVX512VL: # %bb.0:
2805 ; AVX512VL-NEXT: vpslld $24, %xmm0, %xmm0
2806 ; AVX512VL-NEXT: vpsrad $27, %xmm0, %xmm0
2807 ; AVX512VL-NEXT: retq
2809 ; X32-SSE-LABEL: splatconstant_shift_v4i8:
2811 ; X32-SSE-NEXT: pslld $24, %xmm0
2812 ; X32-SSE-NEXT: psrad $27, %xmm0
2813 ; X32-SSE-NEXT: retl
2814 %shift = ashr <4 x i8> %a, <i8 3, i8 3, i8 3, i8 3>
2818 define <2 x i8> @splatconstant_shift_v2i8(<2 x i8> %a) nounwind {
2819 ; SSE2-LABEL: splatconstant_shift_v2i8:
2821 ; SSE2-NEXT: psllq $56, %xmm0
2822 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2823 ; SSE2-NEXT: psrad $31, %xmm1
2824 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2825 ; SSE2-NEXT: psrad $24, %xmm0
2826 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2827 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2828 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2829 ; SSE2-NEXT: psrad $3, %xmm1
2830 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2831 ; SSE2-NEXT: psrlq $3, %xmm0
2832 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2833 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2836 ; SSE41-LABEL: splatconstant_shift_v2i8:
2838 ; SSE41-NEXT: psllq $56, %xmm0
2839 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2840 ; SSE41-NEXT: psrad $31, %xmm1
2841 ; SSE41-NEXT: psrad $24, %xmm0
2842 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2843 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2844 ; SSE41-NEXT: movdqa %xmm0, %xmm1
2845 ; SSE41-NEXT: psrad $3, %xmm1
2846 ; SSE41-NEXT: psrlq $3, %xmm0
2847 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2850 ; AVX1-LABEL: splatconstant_shift_v2i8:
2852 ; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0
2853 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
2854 ; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
2855 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2856 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2857 ; AVX1-NEXT: vpsrad $3, %xmm0, %xmm1
2858 ; AVX1-NEXT: vpsrlq $3, %xmm0, %xmm0
2859 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
2862 ; AVX2-LABEL: splatconstant_shift_v2i8:
2864 ; AVX2-NEXT: vpsllq $56, %xmm0, %xmm0
2865 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
2866 ; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
2867 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2868 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
2869 ; AVX2-NEXT: vpsrad $3, %xmm0, %xmm1
2870 ; AVX2-NEXT: vpsrlq $3, %xmm0, %xmm0
2871 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
2874 ; XOP-LABEL: splatconstant_shift_v2i8:
2876 ; XOP-NEXT: vpsllq $56, %xmm0, %xmm0
2877 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2878 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm0
2881 ; AVX512-LABEL: splatconstant_shift_v2i8:
2883 ; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0
2884 ; AVX512-NEXT: vpsraq $59, %zmm0, %zmm0
2885 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
2886 ; AVX512-NEXT: vzeroupper
2889 ; AVX512VL-LABEL: splatconstant_shift_v2i8:
2890 ; AVX512VL: # %bb.0:
2891 ; AVX512VL-NEXT: vpsllq $56, %xmm0, %xmm0
2892 ; AVX512VL-NEXT: vpsraq $59, %xmm0, %xmm0
2893 ; AVX512VL-NEXT: retq
2895 ; X32-SSE-LABEL: splatconstant_shift_v2i8:
2897 ; X32-SSE-NEXT: psllq $56, %xmm0
2898 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2899 ; X32-SSE-NEXT: psrad $31, %xmm1
2900 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2901 ; X32-SSE-NEXT: psrad $24, %xmm0
2902 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
2903 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2904 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1
2905 ; X32-SSE-NEXT: psrad $3, %xmm1
2906 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
2907 ; X32-SSE-NEXT: psrlq $3, %xmm0
2908 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2909 ; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
2910 ; X32-SSE-NEXT: retl
2911 %shift = ashr <2 x i8> %a, <i8 3, i8 3>