1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefix=AVX2
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefix=AVX2
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefix=AVX512VLBW
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefix=AVX512VLBW
16 define <2 x i64> @ext_i2_2i64(i2 %a0) {
17 ; SSE2-SSSE3-LABEL: ext_i2_2i64:
18 ; SSE2-SSSE3: # %bb.0:
19 ; SSE2-SSSE3-NEXT: movd %edi, %xmm0
20 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
21 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
22 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
23 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
24 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
25 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
26 ; SSE2-SSSE3-NEXT: psrlq $63, %xmm0
27 ; SSE2-SSSE3-NEXT: retq
29 ; AVX1-LABEL: ext_i2_2i64:
31 ; AVX1-NEXT: vmovd %edi, %xmm0
32 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
33 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
34 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
35 ; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
36 ; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
39 ; AVX2-LABEL: ext_i2_2i64:
41 ; AVX2-NEXT: vmovd %edi, %xmm0
42 ; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
43 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
44 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
45 ; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
46 ; AVX2-NEXT: vpsrlq $63, %xmm0, %xmm0
49 ; AVX512F-LABEL: ext_i2_2i64:
51 ; AVX512F-NEXT: kmovw %edi, %k1
52 ; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
53 ; AVX512F-NEXT: vpsrlq $63, %xmm0, %xmm0
54 ; AVX512F-NEXT: vzeroupper
57 ; AVX512VLBW-LABEL: ext_i2_2i64:
58 ; AVX512VLBW: # %bb.0:
59 ; AVX512VLBW-NEXT: kmovd %edi, %k1
60 ; AVX512VLBW-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
61 ; AVX512VLBW-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
62 ; AVX512VLBW-NEXT: vpsrlq $63, %xmm0, %xmm0
63 ; AVX512VLBW-NEXT: retq
64 %1 = bitcast i2 %a0 to <2 x i1>
65 %2 = zext <2 x i1> %1 to <2 x i64>
69 define <4 x i32> @ext_i4_4i32(i4 %a0) {
70 ; SSE2-SSSE3-LABEL: ext_i4_4i32:
71 ; SSE2-SSSE3: # %bb.0:
72 ; SSE2-SSSE3-NEXT: movd %edi, %xmm0
73 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
74 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
75 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
76 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
77 ; SSE2-SSSE3-NEXT: psrld $31, %xmm0
78 ; SSE2-SSSE3-NEXT: retq
80 ; AVX1-LABEL: ext_i4_4i32:
82 ; AVX1-NEXT: vmovd %edi, %xmm0
83 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
84 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
85 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
86 ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
87 ; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
90 ; AVX2-LABEL: ext_i4_4i32:
92 ; AVX2-NEXT: vmovd %edi, %xmm0
93 ; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
94 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8]
95 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
96 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
97 ; AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
100 ; AVX512F-LABEL: ext_i4_4i32:
102 ; AVX512F-NEXT: kmovw %edi, %k1
103 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
104 ; AVX512F-NEXT: vpsrld $31, %xmm0, %xmm0
105 ; AVX512F-NEXT: vzeroupper
108 ; AVX512VLBW-LABEL: ext_i4_4i32:
109 ; AVX512VLBW: # %bb.0:
110 ; AVX512VLBW-NEXT: kmovd %edi, %k1
111 ; AVX512VLBW-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
112 ; AVX512VLBW-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
113 ; AVX512VLBW-NEXT: vpsrld $31, %xmm0, %xmm0
114 ; AVX512VLBW-NEXT: retq
115 %1 = bitcast i4 %a0 to <4 x i1>
116 %2 = zext <4 x i1> %1 to <4 x i32>
120 define <8 x i16> @ext_i8_8i16(i8 %a0) {
121 ; SSE2-SSSE3-LABEL: ext_i8_8i16:
122 ; SSE2-SSSE3: # %bb.0:
123 ; SSE2-SSSE3-NEXT: movd %edi, %xmm0
124 ; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
125 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
126 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
127 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
128 ; SSE2-SSSE3-NEXT: pcmpeqw %xmm1, %xmm0
129 ; SSE2-SSSE3-NEXT: psrlw $15, %xmm0
130 ; SSE2-SSSE3-NEXT: retq
132 ; AVX1-LABEL: ext_i8_8i16:
134 ; AVX1-NEXT: vmovd %edi, %xmm0
135 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
136 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
137 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
138 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
139 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
140 ; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
143 ; AVX2-LABEL: ext_i8_8i16:
145 ; AVX2-NEXT: vmovd %edi, %xmm0
146 ; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
147 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
148 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
149 ; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
150 ; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
153 ; AVX512F-LABEL: ext_i8_8i16:
155 ; AVX512F-NEXT: kmovw %edi, %k1
156 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
157 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
158 ; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0
159 ; AVX512F-NEXT: vzeroupper
162 ; AVX512VLBW-LABEL: ext_i8_8i16:
163 ; AVX512VLBW: # %bb.0:
164 ; AVX512VLBW-NEXT: kmovd %edi, %k0
165 ; AVX512VLBW-NEXT: vpmovm2w %k0, %xmm0
166 ; AVX512VLBW-NEXT: vpsrlw $15, %xmm0, %xmm0
167 ; AVX512VLBW-NEXT: retq
168 %1 = bitcast i8 %a0 to <8 x i1>
169 %2 = zext <8 x i1> %1 to <8 x i16>
173 define <16 x i8> @ext_i16_16i8(i16 %a0) {
174 ; SSE2-LABEL: ext_i16_16i8:
176 ; SSE2-NEXT: movd %edi, %xmm0
177 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
178 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7]
179 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
180 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
181 ; SSE2-NEXT: pand %xmm1, %xmm0
182 ; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
183 ; SSE2-NEXT: psrlw $7, %xmm0
184 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
187 ; SSSE3-LABEL: ext_i16_16i8:
189 ; SSSE3-NEXT: movd %edi, %xmm0
190 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
191 ; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
192 ; SSSE3-NEXT: pand %xmm1, %xmm0
193 ; SSSE3-NEXT: pcmpeqb %xmm1, %xmm0
194 ; SSSE3-NEXT: psrlw $7, %xmm0
195 ; SSSE3-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
198 ; AVX1-LABEL: ext_i16_16i8:
200 ; AVX1-NEXT: vmovd %edi, %xmm0
201 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
202 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
203 ; AVX1-NEXT: # xmm1 = mem[0,0]
204 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
205 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
206 ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
207 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
210 ; AVX2-LABEL: ext_i16_16i8:
212 ; AVX2-NEXT: vmovd %edi, %xmm0
213 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
214 ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
215 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
216 ; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
217 ; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0
218 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
221 ; AVX512F-LABEL: ext_i16_16i8:
223 ; AVX512F-NEXT: kmovw %edi, %k1
224 ; AVX512F-NEXT: vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0 {%k1} {z}
225 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
226 ; AVX512F-NEXT: vzeroupper
229 ; AVX512VLBW-LABEL: ext_i16_16i8:
230 ; AVX512VLBW: # %bb.0:
231 ; AVX512VLBW-NEXT: kmovd %edi, %k1
232 ; AVX512VLBW-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z}
233 ; AVX512VLBW-NEXT: retq
234 %1 = bitcast i16 %a0 to <16 x i1>
235 %2 = zext <16 x i1> %1 to <16 x i8>
243 define <4 x i64> @ext_i4_4i64(i4 %a0) {
244 ; SSE2-SSSE3-LABEL: ext_i4_4i64:
245 ; SSE2-SSSE3: # %bb.0:
246 ; SSE2-SSSE3-NEXT: movd %edi, %xmm0
247 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
248 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
249 ; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm1
250 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
251 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
252 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
253 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
254 ; SSE2-SSSE3-NEXT: psrlq $63, %xmm0
255 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [4,8]
256 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2
257 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm2
258 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,0,3,2]
259 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
260 ; SSE2-SSSE3-NEXT: psrlq $63, %xmm1
261 ; SSE2-SSSE3-NEXT: retq
263 ; AVX1-LABEL: ext_i4_4i64:
265 ; AVX1-NEXT: vmovd %edi, %xmm0
266 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
267 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
268 ; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [1,2,4,8]
269 ; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
270 ; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm1
271 ; AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
272 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
273 ; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
274 ; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
275 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
278 ; AVX2-LABEL: ext_i4_4i64:
280 ; AVX2-NEXT: vmovd %edi, %xmm0
281 ; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
282 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
283 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
284 ; AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
285 ; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0
288 ; AVX512F-LABEL: ext_i4_4i64:
290 ; AVX512F-NEXT: kmovw %edi, %k1
291 ; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
292 ; AVX512F-NEXT: vpsrlq $63, %ymm0, %ymm0
295 ; AVX512VLBW-LABEL: ext_i4_4i64:
296 ; AVX512VLBW: # %bb.0:
297 ; AVX512VLBW-NEXT: kmovd %edi, %k1
298 ; AVX512VLBW-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
299 ; AVX512VLBW-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
300 ; AVX512VLBW-NEXT: vpsrlq $63, %ymm0, %ymm0
301 ; AVX512VLBW-NEXT: retq
302 %1 = bitcast i4 %a0 to <4 x i1>
303 %2 = zext <4 x i1> %1 to <4 x i64>
307 define <8 x i32> @ext_i8_8i32(i8 %a0) {
308 ; SSE2-SSSE3-LABEL: ext_i8_8i32:
309 ; SSE2-SSSE3: # %bb.0:
310 ; SSE2-SSSE3-NEXT: movd %edi, %xmm0
311 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
312 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8]
313 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
314 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
315 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
316 ; SSE2-SSSE3-NEXT: psrld $31, %xmm0
317 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
318 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
319 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm1
320 ; SSE2-SSSE3-NEXT: psrld $31, %xmm1
321 ; SSE2-SSSE3-NEXT: retq
323 ; AVX1-LABEL: ext_i8_8i32:
325 ; AVX1-NEXT: vmovd %edi, %xmm0
326 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
327 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
328 ; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
329 ; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
330 ; AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1
331 ; AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
332 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
333 ; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
334 ; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
335 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
338 ; AVX2-LABEL: ext_i8_8i32:
340 ; AVX2-NEXT: vmovd %edi, %xmm0
341 ; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
342 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
343 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
344 ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
345 ; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0
348 ; AVX512F-LABEL: ext_i8_8i32:
350 ; AVX512F-NEXT: kmovw %edi, %k1
351 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
352 ; AVX512F-NEXT: vpsrld $31, %ymm0, %ymm0
355 ; AVX512VLBW-LABEL: ext_i8_8i32:
356 ; AVX512VLBW: # %bb.0:
357 ; AVX512VLBW-NEXT: kmovd %edi, %k1
358 ; AVX512VLBW-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
359 ; AVX512VLBW-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
360 ; AVX512VLBW-NEXT: vpsrld $31, %ymm0, %ymm0
361 ; AVX512VLBW-NEXT: retq
362 %1 = bitcast i8 %a0 to <8 x i1>
363 %2 = zext <8 x i1> %1 to <8 x i32>
367 define <16 x i16> @ext_i16_16i16(i16 %a0) {
368 ; SSE2-SSSE3-LABEL: ext_i16_16i16:
369 ; SSE2-SSSE3: # %bb.0:
370 ; SSE2-SSSE3-NEXT: movd %edi, %xmm0
371 ; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
372 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
373 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128]
374 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
375 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
376 ; SSE2-SSSE3-NEXT: pcmpeqw %xmm2, %xmm0
377 ; SSE2-SSSE3-NEXT: psrlw $15, %xmm0
378 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [256,512,1024,2048,4096,8192,16384,32768]
379 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
380 ; SSE2-SSSE3-NEXT: pcmpeqw %xmm2, %xmm1
381 ; SSE2-SSSE3-NEXT: psrlw $15, %xmm1
382 ; SSE2-SSSE3-NEXT: retq
384 ; AVX1-LABEL: ext_i16_16i16:
386 ; AVX1-NEXT: vmovd %edi, %xmm0
387 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
388 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
389 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
390 ; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
391 ; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
392 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm1
393 ; AVX1-NEXT: vpsrlw $15, %xmm1, %xmm1
394 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
395 ; AVX1-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
396 ; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
397 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
400 ; AVX2-LABEL: ext_i16_16i16:
402 ; AVX2-NEXT: vmovd %edi, %xmm0
403 ; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
404 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
405 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
406 ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
407 ; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
410 ; AVX512F-LABEL: ext_i16_16i16:
412 ; AVX512F-NEXT: kmovw %edi, %k1
413 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
414 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
415 ; AVX512F-NEXT: vpsrlw $15, %ymm0, %ymm0
418 ; AVX512VLBW-LABEL: ext_i16_16i16:
419 ; AVX512VLBW: # %bb.0:
420 ; AVX512VLBW-NEXT: kmovd %edi, %k0
421 ; AVX512VLBW-NEXT: vpmovm2w %k0, %ymm0
422 ; AVX512VLBW-NEXT: vpsrlw $15, %ymm0, %ymm0
423 ; AVX512VLBW-NEXT: retq
424 %1 = bitcast i16 %a0 to <16 x i1>
425 %2 = zext <16 x i1> %1 to <16 x i16>
429 define <32 x i8> @ext_i32_32i8(i32 %a0) {
430 ; SSE2-SSSE3-LABEL: ext_i32_32i8:
431 ; SSE2-SSSE3: # %bb.0:
432 ; SSE2-SSSE3-NEXT: movd %edi, %xmm1
433 ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
434 ; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
435 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
436 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
437 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
438 ; SSE2-SSSE3-NEXT: pcmpeqb %xmm2, %xmm0
439 ; SSE2-SSSE3-NEXT: psrlw $7, %xmm0
440 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
441 ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
442 ; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,3,3,4,5,6,7]
443 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
444 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
445 ; SSE2-SSSE3-NEXT: pcmpeqb %xmm2, %xmm1
446 ; SSE2-SSSE3-NEXT: psrlw $7, %xmm1
447 ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm1
448 ; SSE2-SSSE3-NEXT: retq
450 ; AVX1-LABEL: ext_i32_32i8:
452 ; AVX1-NEXT: vmovd %edi, %xmm0
453 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
454 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,0,1,1,4,5,6,7]
455 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,3,3,4,5,6,7]
456 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
457 ; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
458 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
459 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
460 ; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
461 ; AVX1-NEXT: # xmm2 = mem[0,0]
462 ; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
463 ; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1
464 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
465 ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
466 ; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
467 ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
468 ; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
469 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
472 ; AVX2-LABEL: ext_i32_32i8:
474 ; AVX2-NEXT: vmovd %edi, %xmm0
475 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
476 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19]
477 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
478 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
479 ; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
480 ; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0
481 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
484 ; AVX512F-LABEL: ext_i32_32i8:
486 ; AVX512F-NEXT: kmovw %edi, %k1
487 ; AVX512F-NEXT: shrl $16, %edi
488 ; AVX512F-NEXT: kmovw %edi, %k2
489 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
490 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1} {z}
491 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
492 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm0 {%k2} {z}
493 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
494 ; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
497 ; AVX512VLBW-LABEL: ext_i32_32i8:
498 ; AVX512VLBW: # %bb.0:
499 ; AVX512VLBW-NEXT: kmovd %edi, %k1
500 ; AVX512VLBW-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0 {%k1} {z}
501 ; AVX512VLBW-NEXT: retq
502 %1 = bitcast i32 %a0 to <32 x i1>
503 %2 = zext <32 x i1> %1 to <32 x i8>
511 define <8 x i64> @ext_i8_8i64(i8 %a0) {
512 ; SSE2-SSSE3-LABEL: ext_i8_8i64:
513 ; SSE2-SSSE3: # %bb.0:
514 ; SSE2-SSSE3-NEXT: movd %edi, %xmm0
515 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
516 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
517 ; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm1
518 ; SSE2-SSSE3-NEXT: pand %xmm0, %xmm1
519 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm1
520 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
521 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
522 ; SSE2-SSSE3-NEXT: psrlq $63, %xmm0
523 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [4,8]
524 ; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm2
525 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2
526 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm2
527 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,0,3,2]
528 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
529 ; SSE2-SSSE3-NEXT: psrlq $63, %xmm1
530 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32]
531 ; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm3
532 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm3
533 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm3
534 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,0,3,2]
535 ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm2
536 ; SSE2-SSSE3-NEXT: psrlq $63, %xmm2
537 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [64,128]
538 ; SSE2-SSSE3-NEXT: pand %xmm3, %xmm4
539 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm3, %xmm4
540 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,0,3,2]
541 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
542 ; SSE2-SSSE3-NEXT: psrlq $63, %xmm3
543 ; SSE2-SSSE3-NEXT: retq
545 ; AVX1-LABEL: ext_i8_8i64:
547 ; AVX1-NEXT: vmovd %edi, %xmm0
548 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
549 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
550 ; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,8]
551 ; AVX1-NEXT: vandps %ymm0, %ymm1, %ymm2
552 ; AVX1-NEXT: vpcmpeqq %xmm0, %xmm2, %xmm0
553 ; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
554 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
555 ; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
556 ; AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2
557 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
558 ; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [16,32,64,128]
559 ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
560 ; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm2
561 ; AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2
562 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
563 ; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
564 ; AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
565 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
568 ; AVX2-LABEL: ext_i8_8i64:
570 ; AVX2-NEXT: vmovd %edi, %xmm0
571 ; AVX2-NEXT: vpbroadcastb %xmm0, %ymm1
572 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
573 ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm2
574 ; AVX2-NEXT: vpcmpeqq %ymm0, %ymm2, %ymm0
575 ; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0
576 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,32,64,128]
577 ; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
578 ; AVX2-NEXT: vpcmpeqq %ymm2, %ymm1, %ymm1
579 ; AVX2-NEXT: vpsrlq $63, %ymm1, %ymm1
582 ; AVX512F-LABEL: ext_i8_8i64:
584 ; AVX512F-NEXT: kmovw %edi, %k1
585 ; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
586 ; AVX512F-NEXT: vpsrlq $63, %zmm0, %zmm0
589 ; AVX512VLBW-LABEL: ext_i8_8i64:
590 ; AVX512VLBW: # %bb.0:
591 ; AVX512VLBW-NEXT: kmovd %edi, %k1
592 ; AVX512VLBW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
593 ; AVX512VLBW-NEXT: vpsrlq $63, %zmm0, %zmm0
594 ; AVX512VLBW-NEXT: retq
595 %1 = bitcast i8 %a0 to <8 x i1>
596 %2 = zext <8 x i1> %1 to <8 x i64>
600 define <16 x i32> @ext_i16_16i32(i16 %a0) {
601 ; SSE2-SSSE3-LABEL: ext_i16_16i32:
602 ; SSE2-SSSE3: # %bb.0:
603 ; SSE2-SSSE3-NEXT: movd %edi, %xmm0
604 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
605 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8]
606 ; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm0
607 ; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
608 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
609 ; SSE2-SSSE3-NEXT: psrld $31, %xmm0
610 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,32,64,128]
611 ; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm1
612 ; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1
613 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm1
614 ; SSE2-SSSE3-NEXT: psrld $31, %xmm1
615 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [256,512,1024,2048]
616 ; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
617 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
618 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm2
619 ; SSE2-SSSE3-NEXT: psrld $31, %xmm2
620 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [4096,8192,16384,32768]
621 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
622 ; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm3
623 ; SSE2-SSSE3-NEXT: psrld $31, %xmm3
624 ; SSE2-SSSE3-NEXT: retq
626 ; AVX1-LABEL: ext_i16_16i32:
628 ; AVX1-NEXT: vmovd %edi, %xmm0
629 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
630 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
631 ; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [1,2,4,8,16,32,64,128]
632 ; AVX1-NEXT: vandps %ymm0, %ymm1, %ymm2
633 ; AVX1-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0
634 ; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
635 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
636 ; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
637 ; AVX1-NEXT: vpsrld $31, %xmm2, %xmm2
638 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
639 ; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [256,512,1024,2048,4096,8192,16384,32768]
640 ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
641 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm2
642 ; AVX1-NEXT: vpsrld $31, %xmm2, %xmm2
643 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
644 ; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
645 ; AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
646 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
649 ; AVX2-LABEL: ext_i16_16i32:
651 ; AVX2-NEXT: vmovd %edi, %xmm0
652 ; AVX2-NEXT: vpbroadcastw %xmm0, %ymm1
653 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8,16,32,64,128]
654 ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm2
655 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm0
656 ; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0
657 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [256,512,1024,2048,4096,8192,16384,32768]
658 ; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
659 ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm1
660 ; AVX2-NEXT: vpsrld $31, %ymm1, %ymm1
663 ; AVX512F-LABEL: ext_i16_16i32:
665 ; AVX512F-NEXT: kmovw %edi, %k1
666 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
667 ; AVX512F-NEXT: vpsrld $31, %zmm0, %zmm0
670 ; AVX512VLBW-LABEL: ext_i16_16i32:
671 ; AVX512VLBW: # %bb.0:
672 ; AVX512VLBW-NEXT: kmovd %edi, %k1
673 ; AVX512VLBW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
674 ; AVX512VLBW-NEXT: vpsrld $31, %zmm0, %zmm0
675 ; AVX512VLBW-NEXT: retq
676 %1 = bitcast i16 %a0 to <16 x i1>
677 %2 = zext <16 x i1> %1 to <16 x i32>
681 define <32 x i16> @ext_i32_32i16(i32 %a0) {
682 ; SSE2-SSSE3-LABEL: ext_i32_32i16:
683 ; SSE2-SSSE3: # %bb.0:
684 ; SSE2-SSSE3-NEXT: movd %edi, %xmm2
685 ; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[0,0,0,0,4,5,6,7]
686 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
687 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
688 ; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm0
689 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
690 ; SSE2-SSSE3-NEXT: pcmpeqw %xmm4, %xmm0
691 ; SSE2-SSSE3-NEXT: psrlw $15, %xmm0
692 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [256,512,1024,2048,4096,8192,16384,32768]
693 ; SSE2-SSSE3-NEXT: pand %xmm5, %xmm1
694 ; SSE2-SSSE3-NEXT: pcmpeqw %xmm5, %xmm1
695 ; SSE2-SSSE3-NEXT: psrlw $15, %xmm1
696 ; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,1,1,1,4,5,6,7]
697 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,0,0]
698 ; SSE2-SSSE3-NEXT: movdqa %xmm3, %xmm2
699 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
700 ; SSE2-SSSE3-NEXT: pcmpeqw %xmm4, %xmm2
701 ; SSE2-SSSE3-NEXT: psrlw $15, %xmm2
702 ; SSE2-SSSE3-NEXT: pand %xmm5, %xmm3
703 ; SSE2-SSSE3-NEXT: pcmpeqw %xmm5, %xmm3
704 ; SSE2-SSSE3-NEXT: psrlw $15, %xmm3
705 ; SSE2-SSSE3-NEXT: retq
707 ; AVX1-LABEL: ext_i32_32i16:
709 ; AVX1-NEXT: vmovd %edi, %xmm1
710 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,0,0,4,5,6,7]
711 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
712 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
713 ; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
714 ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
715 ; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
716 ; AVX1-NEXT: vpsrlw $15, %xmm3, %xmm3
717 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
718 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [256,512,1024,2048,4096,8192,16384,32768]
719 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
720 ; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
721 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
722 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
723 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
724 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
725 ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
726 ; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm2
727 ; AVX1-NEXT: vpsrlw $15, %xmm2, %xmm2
728 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
729 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm1, %xmm1
730 ; AVX1-NEXT: vpsrlw $15, %xmm1, %xmm1
731 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
734 ; AVX2-LABEL: ext_i32_32i16:
736 ; AVX2-NEXT: vmovd %edi, %xmm0
737 ; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
738 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
739 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
740 ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
741 ; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
742 ; AVX2-NEXT: shrl $16, %edi
743 ; AVX2-NEXT: vmovd %edi, %xmm2
744 ; AVX2-NEXT: vpbroadcastw %xmm2, %ymm2
745 ; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm2
746 ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm2, %ymm1
747 ; AVX2-NEXT: vpsrlw $15, %ymm1, %ymm1
750 ; AVX512F-LABEL: ext_i32_32i16:
752 ; AVX512F-NEXT: kmovw %edi, %k1
753 ; AVX512F-NEXT: shrl $16, %edi
754 ; AVX512F-NEXT: kmovw %edi, %k2
755 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
756 ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
757 ; AVX512F-NEXT: vpsrlw $15, %ymm0, %ymm0
758 ; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
759 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
760 ; AVX512F-NEXT: vpsrlw $15, %ymm1, %ymm1
761 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
764 ; AVX512VLBW-LABEL: ext_i32_32i16:
765 ; AVX512VLBW: # %bb.0:
766 ; AVX512VLBW-NEXT: kmovd %edi, %k0
767 ; AVX512VLBW-NEXT: vpmovm2w %k0, %zmm0
768 ; AVX512VLBW-NEXT: vpsrlw $15, %zmm0, %zmm0
769 ; AVX512VLBW-NEXT: retq
770 %1 = bitcast i32 %a0 to <32 x i1>
771 %2 = zext <32 x i1> %1 to <32 x i16>
775 define <64 x i8> @ext_i64_64i8(i64 %a0) {
776 ; SSE2-SSSE3-LABEL: ext_i64_64i8:
777 ; SSE2-SSSE3: # %bb.0:
778 ; SSE2-SSSE3-NEXT: movq %rdi, %xmm3
779 ; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
780 ; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[0,0,1,1,4,5,6,7]
781 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
782 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
783 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
784 ; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm0
785 ; SSE2-SSSE3-NEXT: psrlw $7, %xmm0
786 ; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
787 ; SSE2-SSSE3-NEXT: pand %xmm5, %xmm0
788 ; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,2,3,3,4,5,6,7]
789 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
790 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm1
791 ; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm1
792 ; SSE2-SSSE3-NEXT: psrlw $7, %xmm1
793 ; SSE2-SSSE3-NEXT: pand %xmm5, %xmm1
794 ; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,4,5,5]
795 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
796 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm2
797 ; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm2
798 ; SSE2-SSSE3-NEXT: psrlw $7, %xmm2
799 ; SSE2-SSSE3-NEXT: pand %xmm5, %xmm2
800 ; SSE2-SSSE3-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,7,7]
801 ; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
802 ; SSE2-SSSE3-NEXT: pand %xmm4, %xmm3
803 ; SSE2-SSSE3-NEXT: pcmpeqb %xmm4, %xmm3
804 ; SSE2-SSSE3-NEXT: psrlw $7, %xmm3
805 ; SSE2-SSSE3-NEXT: pand %xmm5, %xmm3
806 ; SSE2-SSSE3-NEXT: retq
808 ; AVX1-LABEL: ext_i64_64i8:
810 ; AVX1-NEXT: vmovq %rdi, %xmm0
811 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
812 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
813 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7]
814 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
815 ; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
816 ; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
817 ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
818 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
819 ; AVX1-NEXT: vpcmpeqb %xmm2, %xmm3, %xmm3
820 ; AVX1-NEXT: vpsrlw $7, %xmm3, %xmm3
821 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
822 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
823 ; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
824 ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
825 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
826 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
827 ; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,4,5,5]
828 ; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,7,7]
829 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
830 ; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
831 ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
832 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
833 ; AVX1-NEXT: vpcmpeqb %xmm2, %xmm3, %xmm3
834 ; AVX1-NEXT: vpsrlw $7, %xmm3, %xmm3
835 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
836 ; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
837 ; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1
838 ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
839 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
842 ; AVX2-LABEL: ext_i64_64i8:
844 ; AVX2-NEXT: vmovq %rdi, %xmm0
845 ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,1,0,1]
846 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm1[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19]
847 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
848 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
849 ; AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
850 ; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0
851 ; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
852 ; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
853 ; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,22,22,22,22,22,22,22,22,23,23,23,23,23,23,23,23]
854 ; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
855 ; AVX2-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1
856 ; AVX2-NEXT: vpsrlw $7, %ymm1, %ymm1
857 ; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
860 ; AVX512F-LABEL: ext_i64_64i8:
862 ; AVX512F-NEXT: movq %rdi, %rax
863 ; AVX512F-NEXT: movl %edi, %ecx
864 ; AVX512F-NEXT: kmovw %edi, %k1
865 ; AVX512F-NEXT: shrq $32, %rdi
866 ; AVX512F-NEXT: shrq $48, %rax
867 ; AVX512F-NEXT: shrl $16, %ecx
868 ; AVX512F-NEXT: kmovw %ecx, %k2
869 ; AVX512F-NEXT: kmovw %eax, %k3
870 ; AVX512F-NEXT: kmovw %edi, %k4
871 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
872 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k4} {z}
873 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
874 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm2 {%k3} {z}
875 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
876 ; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
877 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm2 {%k1} {z}
878 ; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
879 ; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm0 {%k2} {z}
880 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
881 ; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
882 ; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
885 ; AVX512VLBW-LABEL: ext_i64_64i8:
886 ; AVX512VLBW: # %bb.0:
887 ; AVX512VLBW-NEXT: kmovq %rdi, %k1
888 ; AVX512VLBW-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0 {%k1} {z}
889 ; AVX512VLBW-NEXT: retq
890 %1 = bitcast i64 %a0 to <64 x i1>
891 %2 = zext <64 x i1> %1 to <64 x i8>