1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=X86-SSE
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 --check-prefix=X86-AVX --check-prefix=X86-AVX1
4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=X86-AVX --check-prefix=X86-AVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=X64-SSE
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 --check-prefix=X64-AVX --check-prefix=X64-AVX1
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=X64-AVX --check-prefix=X64-AVX2
9 define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
10 ; SSE-LABEL: trunc_ashr_v4i64:
12 ; SSE-NEXT: psrad $31, %xmm1
13 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
14 ; SSE-NEXT: psrad $31, %xmm0
15 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
16 ; SSE-NEXT: packssdw %xmm1, %xmm0
17 ; SSE-NEXT: ret{{[l|q]}}
19 ; AVX1-LABEL: trunc_ashr_v4i64:
21 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
22 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
23 ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
24 ; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
25 ; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
26 ; AVX1-NEXT: vzeroupper
27 ; AVX1-NEXT: ret{{[l|q]}}
29 ; AVX2-LABEL: trunc_ashr_v4i64:
31 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
32 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
33 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
34 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
35 ; AVX2-NEXT: vzeroupper
36 ; AVX2-NEXT: ret{{[l|q]}}
37 %1 = ashr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
38 %2 = trunc <4 x i64> %1 to <4 x i32>
42 define <8 x i16> @trunc_ashr_v4i64_bitcast(<4 x i64> %a0) {
43 ; SSE-LABEL: trunc_ashr_v4i64_bitcast:
45 ; SSE-NEXT: movdqa %xmm1, %xmm2
46 ; SSE-NEXT: psrad $31, %xmm2
47 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
48 ; SSE-NEXT: psrad $17, %xmm1
49 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
50 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
51 ; SSE-NEXT: movdqa %xmm0, %xmm2
52 ; SSE-NEXT: psrad $31, %xmm2
53 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
54 ; SSE-NEXT: psrad $17, %xmm0
55 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
56 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
57 ; SSE-NEXT: packssdw %xmm1, %xmm0
58 ; SSE-NEXT: ret{{[l|q]}}
60 ; AVX1-LABEL: trunc_ashr_v4i64_bitcast:
62 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
63 ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
64 ; AVX1-NEXT: vpsrad $17, %xmm1, %xmm1
65 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
66 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
67 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
68 ; AVX1-NEXT: vpsrad $17, %xmm0, %xmm0
69 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
70 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
71 ; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
72 ; AVX1-NEXT: vzeroupper
73 ; AVX1-NEXT: ret{{[l|q]}}
75 ; AVX2-LABEL: trunc_ashr_v4i64_bitcast:
77 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
78 ; AVX2-NEXT: vpsrad $17, %ymm0, %ymm0
79 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
80 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
81 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
82 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
83 ; AVX2-NEXT: vzeroupper
84 ; AVX2-NEXT: ret{{[l|q]}}
85 %1 = ashr <4 x i64> %a0, <i64 49, i64 49, i64 49, i64 49>
86 %2 = bitcast <4 x i64> %1 to <8 x i32>
87 %3 = trunc <8 x i32> %2 to <8 x i16>
91 define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
92 ; SSE-LABEL: trunc_ashr_v8i32:
94 ; SSE-NEXT: psrad $31, %xmm1
95 ; SSE-NEXT: psrad $31, %xmm0
96 ; SSE-NEXT: packssdw %xmm1, %xmm0
97 ; SSE-NEXT: ret{{[l|q]}}
99 ; AVX1-LABEL: trunc_ashr_v8i32:
101 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
102 ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
103 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
104 ; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
105 ; AVX1-NEXT: vzeroupper
106 ; AVX1-NEXT: ret{{[l|q]}}
108 ; AVX2-LABEL: trunc_ashr_v8i32:
110 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
111 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
112 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
113 ; AVX2-NEXT: vzeroupper
114 ; AVX2-NEXT: ret{{[l|q]}}
115 %1 = ashr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
116 %2 = trunc <8 x i32> %1 to <8 x i16>
120 define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
121 ; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
123 ; X86-SSE-NEXT: psrad $31, %xmm0
124 ; X86-SSE-NEXT: pcmpgtd {{\.LCPI.*}}, %xmm1
125 ; X86-SSE-NEXT: packssdw %xmm1, %xmm0
128 ; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
130 ; X86-AVX-NEXT: vpsrad $31, %xmm0, %xmm0
131 ; X86-AVX-NEXT: vpcmpgtd {{\.LCPI.*}}, %xmm1, %xmm1
132 ; X86-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
135 ; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
137 ; X64-SSE-NEXT: psrad $31, %xmm0
138 ; X64-SSE-NEXT: pcmpgtd {{.*}}(%rip), %xmm1
139 ; X64-SSE-NEXT: packssdw %xmm1, %xmm0
142 ; X64-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
144 ; X64-AVX-NEXT: vpsrad $31, %xmm0, %xmm0
145 ; X64-AVX-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1
146 ; X64-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
148 %1 = ashr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31>
149 %2 = icmp sgt <4 x i32> %b, <i32 1, i32 16, i32 255, i32 65535>
150 %3 = sext <4 x i1> %2 to <4 x i32>
151 %4 = shufflevector <4 x i32> %1, <4 x i32> %3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
152 %5 = trunc <8 x i32> %4 to <8 x i16>
156 define <8 x i16> @trunc_ashr_v4i64_demandedelts(<4 x i64> %a0) {
157 ; X86-SSE-LABEL: trunc_ashr_v4i64_demandedelts:
159 ; X86-SSE-NEXT: psllq $63, %xmm1
160 ; X86-SSE-NEXT: psllq $63, %xmm0
161 ; X86-SSE-NEXT: psrlq $63, %xmm0
162 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [4.9406564584124654E-324,-0.0E+0]
163 ; X86-SSE-NEXT: pxor %xmm2, %xmm0
164 ; X86-SSE-NEXT: psubq %xmm2, %xmm0
165 ; X86-SSE-NEXT: psrlq $63, %xmm1
166 ; X86-SSE-NEXT: pxor %xmm2, %xmm1
167 ; X86-SSE-NEXT: psubq %xmm2, %xmm1
168 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
169 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
170 ; X86-SSE-NEXT: packssdw %xmm1, %xmm0
173 ; X86-AVX1-LABEL: trunc_ashr_v4i64_demandedelts:
175 ; X86-AVX1-NEXT: vpsllq $63, %xmm0, %xmm1
176 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
177 ; X86-AVX1-NEXT: vpsllq $63, %xmm2, %xmm2
178 ; X86-AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2
179 ; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,0,2147483648]
180 ; X86-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
181 ; X86-AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
182 ; X86-AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
183 ; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
184 ; X86-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
185 ; X86-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
186 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
187 ; X86-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
188 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
189 ; X86-AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
190 ; X86-AVX1-NEXT: vzeroupper
191 ; X86-AVX1-NEXT: retl
193 ; X86-AVX2-LABEL: trunc_ashr_v4i64_demandedelts:
195 ; X86-AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [63,0,0,0,63,0,0,0]
196 ; X86-AVX2-NEXT: # ymm1 = mem[0,1,0,1]
197 ; X86-AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
198 ; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
199 ; X86-AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2
200 ; X86-AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
201 ; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
202 ; X86-AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
203 ; X86-AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
204 ; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
205 ; X86-AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
206 ; X86-AVX2-NEXT: vzeroupper
207 ; X86-AVX2-NEXT: retl
209 ; X64-SSE-LABEL: trunc_ashr_v4i64_demandedelts:
211 ; X64-SSE-NEXT: psllq $63, %xmm1
212 ; X64-SSE-NEXT: psllq $63, %xmm0
213 ; X64-SSE-NEXT: psrlq $63, %xmm0
214 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,9223372036854775808]
215 ; X64-SSE-NEXT: pxor %xmm2, %xmm0
216 ; X64-SSE-NEXT: psubq %xmm2, %xmm0
217 ; X64-SSE-NEXT: psrlq $63, %xmm1
218 ; X64-SSE-NEXT: pxor %xmm2, %xmm1
219 ; X64-SSE-NEXT: psubq %xmm2, %xmm1
220 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
221 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
222 ; X64-SSE-NEXT: packssdw %xmm1, %xmm0
225 ; X64-AVX1-LABEL: trunc_ashr_v4i64_demandedelts:
227 ; X64-AVX1-NEXT: vpsllq $63, %xmm0, %xmm1
228 ; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
229 ; X64-AVX1-NEXT: vpsllq $63, %xmm2, %xmm2
230 ; X64-AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2
231 ; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,9223372036854775808]
232 ; X64-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
233 ; X64-AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
234 ; X64-AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
235 ; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
236 ; X64-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
237 ; X64-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
238 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
239 ; X64-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
240 ; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
241 ; X64-AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
242 ; X64-AVX1-NEXT: vzeroupper
243 ; X64-AVX1-NEXT: retq
245 ; X64-AVX2-LABEL: trunc_ashr_v4i64_demandedelts:
247 ; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
248 ; X64-AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
249 ; X64-AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [1,9223372036854775808,1,9223372036854775808]
250 ; X64-AVX2-NEXT: # ymm1 = mem[0,1,0,1]
251 ; X64-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
252 ; X64-AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
253 ; X64-AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
254 ; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
255 ; X64-AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
256 ; X64-AVX2-NEXT: vzeroupper
257 ; X64-AVX2-NEXT: retq
258 %1 = shl <4 x i64> %a0, <i64 63, i64 0, i64 63, i64 0>
259 %2 = ashr <4 x i64> %1, <i64 63, i64 0, i64 63, i64 0>
260 %3 = bitcast <4 x i64> %2 to <8 x i32>
261 %4 = shufflevector <8 x i32> %3, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
262 %5 = trunc <8 x i32> %4 to <8 x i16>
266 define <16 x i8> @packsswb_icmp_zero_128(<8 x i16> %a0) {
267 ; SSE-LABEL: packsswb_icmp_zero_128:
269 ; SSE-NEXT: pxor %xmm1, %xmm1
270 ; SSE-NEXT: pcmpeqw %xmm1, %xmm0
271 ; SSE-NEXT: packsswb %xmm1, %xmm0
272 ; SSE-NEXT: ret{{[l|q]}}
274 ; AVX-LABEL: packsswb_icmp_zero_128:
276 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
277 ; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
278 ; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
279 ; AVX-NEXT: ret{{[l|q]}}
280 %1 = icmp eq <8 x i16> %a0, zeroinitializer
281 %2 = sext <8 x i1> %1 to <8 x i8>
282 %3 = shufflevector <8 x i8> %2, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
286 define <16 x i8> @packsswb_icmp_zero_trunc_128(<8 x i16> %a0) {
287 ; SSE-LABEL: packsswb_icmp_zero_trunc_128:
289 ; SSE-NEXT: pxor %xmm1, %xmm1
290 ; SSE-NEXT: pcmpeqw %xmm1, %xmm0
291 ; SSE-NEXT: packsswb %xmm1, %xmm0
292 ; SSE-NEXT: ret{{[l|q]}}
294 ; AVX-LABEL: packsswb_icmp_zero_trunc_128:
296 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
297 ; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
298 ; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
299 ; AVX-NEXT: ret{{[l|q]}}
300 %1 = icmp eq <8 x i16> %a0, zeroinitializer
301 %2 = sext <8 x i1> %1 to <8 x i16>
302 %3 = shufflevector <8 x i16> %2, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
303 %4 = trunc <16 x i16> %3 to <16 x i8>
307 define <32 x i8> @packsswb_icmp_zero_256(<16 x i16> %a0) {
308 ; SSE-LABEL: packsswb_icmp_zero_256:
310 ; SSE-NEXT: pxor %xmm2, %xmm2
311 ; SSE-NEXT: pcmpeqw %xmm2, %xmm1
312 ; SSE-NEXT: pcmpeqw %xmm2, %xmm0
313 ; SSE-NEXT: pxor %xmm3, %xmm3
314 ; SSE-NEXT: packsswb %xmm0, %xmm3
315 ; SSE-NEXT: packsswb %xmm1, %xmm2
316 ; SSE-NEXT: movdqa %xmm3, %xmm0
317 ; SSE-NEXT: movdqa %xmm2, %xmm1
318 ; SSE-NEXT: ret{{[l|q]}}
320 ; AVX1-LABEL: packsswb_icmp_zero_256:
322 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
323 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm2
324 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
325 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
326 ; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm0
327 ; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
328 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
329 ; AVX1-NEXT: ret{{[l|q]}}
331 ; AVX2-LABEL: packsswb_icmp_zero_256:
333 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
334 ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
335 ; AVX2-NEXT: vpacksswb %ymm0, %ymm1, %ymm0
336 ; AVX2-NEXT: ret{{[l|q]}}
337 %1 = icmp eq <16 x i16> %a0, zeroinitializer
338 %2 = sext <16 x i1> %1 to <16 x i16>
339 %3 = bitcast <16 x i16> %2 to <32 x i8>
340 %4 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
344 define <32 x i8> @packsswb_icmp_zero_trunc_256(<16 x i16> %a0) {
345 ; SSE-LABEL: packsswb_icmp_zero_trunc_256:
347 ; SSE-NEXT: pxor %xmm2, %xmm2
348 ; SSE-NEXT: pcmpeqw %xmm2, %xmm1
349 ; SSE-NEXT: pcmpeqw %xmm2, %xmm0
350 ; SSE-NEXT: pxor %xmm3, %xmm3
351 ; SSE-NEXT: packsswb %xmm0, %xmm3
352 ; SSE-NEXT: packsswb %xmm1, %xmm2
353 ; SSE-NEXT: movdqa %xmm3, %xmm0
354 ; SSE-NEXT: movdqa %xmm2, %xmm1
355 ; SSE-NEXT: ret{{[l|q]}}
357 ; AVX1-LABEL: packsswb_icmp_zero_trunc_256:
359 ; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
360 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
361 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
362 ; AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
363 ; AVX1-NEXT: vpcmpeqw %xmm3, %xmm0, %xmm0
364 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
365 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = zero,zero,ymm0[0,1]
366 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
367 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
368 ; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
369 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
370 ; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
371 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
372 ; AVX1-NEXT: ret{{[l|q]}}
374 ; AVX2-LABEL: packsswb_icmp_zero_trunc_256:
376 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
377 ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
378 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
379 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
380 ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
381 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
382 ; AVX2-NEXT: ret{{[l|q]}}
383 %1 = icmp eq <16 x i16> %a0, zeroinitializer
384 %2 = sext <16 x i1> %1 to <16 x i16>
385 %3 = shufflevector <16 x i16> zeroinitializer, <16 x i16> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
386 %4 = trunc <32 x i16> %3 to <32 x i8>