1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2,X86-SSE,X86-SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2,X64-SSE,X64-SSE2
4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE4,X86-SSE,X86-SSE4
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE4,X64-SSE,X64-SSE4
6 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,X86-AVX,X86-AVX1
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,X64-AVX,X64-AVX1
8 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,X86-AVX,X86-AVX2
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,X64-AVX,X64-AVX2
11 define <4 x i32> @trunc_lshr_v4i64(<4 x i64> %a) nounwind {
12 ; SSE2-LABEL: trunc_lshr_v4i64:
14 ; SSE2-NEXT: psrlq $63, %xmm1
15 ; SSE2-NEXT: psrlq $63, %xmm0
16 ; SSE2-NEXT: packuswb %xmm1, %xmm0
17 ; SSE2-NEXT: ret{{[l|q]}}
19 ; SSE4-LABEL: trunc_lshr_v4i64:
21 ; SSE4-NEXT: psrlq $63, %xmm1
22 ; SSE4-NEXT: psrlq $63, %xmm0
23 ; SSE4-NEXT: packusdw %xmm1, %xmm0
24 ; SSE4-NEXT: ret{{[l|q]}}
26 ; AVX1-LABEL: trunc_lshr_v4i64:
28 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
29 ; AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
30 ; AVX1-NEXT: vpsrlq $63, %xmm0, %xmm0
31 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
32 ; AVX1-NEXT: vzeroupper
33 ; AVX1-NEXT: ret{{[l|q]}}
35 ; AVX2-LABEL: trunc_lshr_v4i64:
37 ; AVX2-NEXT: vpsrlq $63, %ymm0, %ymm0
38 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
39 ; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
40 ; AVX2-NEXT: vzeroupper
41 ; AVX2-NEXT: ret{{[l|q]}}
42 %1 = lshr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
43 %2 = trunc <4 x i64> %1 to <4 x i32>
47 define <8 x i16> @trunc_lshr_v4i64_bitcast(<4 x i64> %a0) {
48 ; SSE2-LABEL: trunc_lshr_v4i64_bitcast:
50 ; SSE2-NEXT: psrlq $49, %xmm1
51 ; SSE2-NEXT: psrlq $49, %xmm0
52 ; SSE2-NEXT: packssdw %xmm1, %xmm0
53 ; SSE2-NEXT: ret{{[l|q]}}
55 ; SSE4-LABEL: trunc_lshr_v4i64_bitcast:
57 ; SSE4-NEXT: psrlq $49, %xmm1
58 ; SSE4-NEXT: psrlq $49, %xmm0
59 ; SSE4-NEXT: packusdw %xmm1, %xmm0
60 ; SSE4-NEXT: ret{{[l|q]}}
62 ; AVX1-LABEL: trunc_lshr_v4i64_bitcast:
64 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
65 ; AVX1-NEXT: vpsrlq $49, %xmm1, %xmm1
66 ; AVX1-NEXT: vpsrlq $49, %xmm0, %xmm0
67 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
68 ; AVX1-NEXT: vzeroupper
69 ; AVX1-NEXT: ret{{[l|q]}}
71 ; AVX2-LABEL: trunc_lshr_v4i64_bitcast:
73 ; AVX2-NEXT: vpsrlq $49, %ymm0, %ymm0
74 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
75 ; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
76 ; AVX2-NEXT: vzeroupper
77 ; AVX2-NEXT: ret{{[l|q]}}
78 %1 = lshr <4 x i64> %a0, <i64 49, i64 49, i64 49, i64 49>
79 %2 = bitcast <4 x i64> %1 to <8 x i32>
80 %3 = trunc <8 x i32> %2 to <8 x i16>
84 define <8 x i16> @trunc_lshr_v8i32(<8 x i32> %a) nounwind {
85 ; SSE2-LABEL: trunc_lshr_v8i32:
87 ; SSE2-NEXT: psrld $31, %xmm1
88 ; SSE2-NEXT: psrld $31, %xmm0
89 ; SSE2-NEXT: packuswb %xmm1, %xmm0
90 ; SSE2-NEXT: ret{{[l|q]}}
92 ; SSE4-LABEL: trunc_lshr_v8i32:
94 ; SSE4-NEXT: psrld $31, %xmm1
95 ; SSE4-NEXT: psrld $31, %xmm0
96 ; SSE4-NEXT: packusdw %xmm1, %xmm0
97 ; SSE4-NEXT: ret{{[l|q]}}
99 ; AVX1-LABEL: trunc_lshr_v8i32:
101 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
102 ; AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
103 ; AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
104 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
105 ; AVX1-NEXT: vzeroupper
106 ; AVX1-NEXT: ret{{[l|q]}}
108 ; AVX2-LABEL: trunc_lshr_v8i32:
110 ; AVX2-NEXT: vpsrld $31, %ymm0, %ymm0
111 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
112 ; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
113 ; AVX2-NEXT: vzeroupper
114 ; AVX2-NEXT: ret{{[l|q]}}
115 %1 = lshr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
116 %2 = trunc <8 x i32> %1 to <8 x i16>
120 define <8 x i16> @trunc_lshr_v4i64_demandedelts(<4 x i64> %a0) {
121 ; X86-SSE2-LABEL: trunc_lshr_v4i64_demandedelts:
123 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
124 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
125 ; X86-SSE2-NEXT: pand %xmm2, %xmm1
126 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
127 ; X86-SSE2-NEXT: pand %xmm2, %xmm0
128 ; X86-SSE2-NEXT: packuswb %xmm1, %xmm0
129 ; X86-SSE2-NEXT: retl
131 ; X64-SSE2-LABEL: trunc_lshr_v4i64_demandedelts:
133 ; X64-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,18446744073709551615]
134 ; X64-SSE2-NEXT: pand %xmm2, %xmm0
135 ; X64-SSE2-NEXT: pand %xmm2, %xmm1
136 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
137 ; X64-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
138 ; X64-SSE2-NEXT: packuswb %xmm1, %xmm0
139 ; X64-SSE2-NEXT: retq
141 ; X86-SSE4-LABEL: trunc_lshr_v4i64_demandedelts:
143 ; X86-SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
144 ; X86-SSE4-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
145 ; X86-SSE4-NEXT: pand %xmm2, %xmm1
146 ; X86-SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
147 ; X86-SSE4-NEXT: pand %xmm2, %xmm0
148 ; X86-SSE4-NEXT: packusdw %xmm1, %xmm0
149 ; X86-SSE4-NEXT: retl
151 ; X64-SSE4-LABEL: trunc_lshr_v4i64_demandedelts:
153 ; X64-SSE4-NEXT: movdqa {{.*#+}} xmm2 = [1,18446744073709551615]
154 ; X64-SSE4-NEXT: pand %xmm2, %xmm0
155 ; X64-SSE4-NEXT: pand %xmm2, %xmm1
156 ; X64-SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
157 ; X64-SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
158 ; X64-SSE4-NEXT: packusdw %xmm1, %xmm0
159 ; X64-SSE4-NEXT: retq
161 ; X86-AVX1-LABEL: trunc_lshr_v4i64_demandedelts:
163 ; X86-AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
164 ; X86-AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
165 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
166 ; X86-AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
167 ; X86-AVX1-NEXT: vzeroupper
168 ; X86-AVX1-NEXT: retl
170 ; X64-AVX1-LABEL: trunc_lshr_v4i64_demandedelts:
172 ; X64-AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
173 ; X64-AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
174 ; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
175 ; X64-AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
176 ; X64-AVX1-NEXT: vzeroupper
177 ; X64-AVX1-NEXT: retq
179 ; AVX2-LABEL: trunc_lshr_v4i64_demandedelts:
181 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
182 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
183 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
184 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
185 ; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
186 ; AVX2-NEXT: vzeroupper
187 ; AVX2-NEXT: ret{{[l|q]}}
188 %1 = shl <4 x i64> %a0, <i64 63, i64 0, i64 63, i64 0>
189 %2 = lshr <4 x i64> %1, <i64 63, i64 0, i64 63, i64 0>
190 %3 = bitcast <4 x i64> %2 to <8 x i32>
191 %4 = shufflevector <8 x i32> %3, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
192 %5 = trunc <8 x i32> %4 to <8 x i16>
196 define <16 x i8> @shuffle_lshr_2v8i16(<8 x i16> %a0, <8 x i16> %a1) {
197 ; SSE-LABEL: shuffle_lshr_2v8i16:
199 ; SSE-NEXT: psrlw $15, %xmm0
200 ; SSE-NEXT: psrlw $15, %xmm1
201 ; SSE-NEXT: packuswb %xmm1, %xmm0
202 ; SSE-NEXT: ret{{[l|q]}}
204 ; AVX-LABEL: shuffle_lshr_2v8i16:
206 ; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0
207 ; AVX-NEXT: vpsrlw $15, %xmm1, %xmm1
208 ; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
209 ; AVX-NEXT: ret{{[l|q]}}
210 %lshr0 = lshr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
211 %lshr1 = lshr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
212 %bc0 = bitcast <8 x i16> %lshr0 to <16 x i8>
213 %bc1 = bitcast <8 x i16> %lshr1 to <16 x i8>
214 %res = shufflevector <16 x i8> %bc0, <16 x i8> %bc1, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
218 define <8 x i16> @shuffle_lshr_2v4i32(<4 x i32> %a0, <4 x i32> %a1) {
219 ; SSE2-LABEL: shuffle_lshr_2v4i32:
221 ; SSE2-NEXT: psrld $31, %xmm0
222 ; SSE2-NEXT: psrld $31, %xmm1
223 ; SSE2-NEXT: packssdw %xmm1, %xmm0
224 ; SSE2-NEXT: ret{{[l|q]}}
226 ; SSE4-LABEL: shuffle_lshr_2v4i32:
228 ; SSE4-NEXT: psrld $31, %xmm0
229 ; SSE4-NEXT: psrld $31, %xmm1
230 ; SSE4-NEXT: packusdw %xmm1, %xmm0
231 ; SSE4-NEXT: ret{{[l|q]}}
233 ; AVX-LABEL: shuffle_lshr_2v4i32:
235 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
236 ; AVX-NEXT: vpsrld $31, %xmm1, %xmm1
237 ; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
238 ; AVX-NEXT: ret{{[l|q]}}
239 %lshr0 = lshr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
240 %lshr1 = lshr <4 x i32> %a1, <i32 31, i32 31, i32 31, i32 31>
241 %bc0 = bitcast <4 x i32> %lshr0 to <8 x i16>
242 %bc1 = bitcast <4 x i32> %lshr1 to <8 x i16>
243 %res = shufflevector <8 x i16> %bc0, <8 x i16> %bc1, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
247 define <4 x i32> @shuffle_lshr_2v2i64(<2 x i64> %a0, <2 x i64> %a1) {
248 ; SSE2-LABEL: shuffle_lshr_2v2i64:
250 ; SSE2-NEXT: psrlq $63, %xmm0
251 ; SSE2-NEXT: psrlq $63, %xmm1
252 ; SSE2-NEXT: packuswb %xmm1, %xmm0
253 ; SSE2-NEXT: ret{{[l|q]}}
255 ; SSE4-LABEL: shuffle_lshr_2v2i64:
257 ; SSE4-NEXT: psrlq $63, %xmm0
258 ; SSE4-NEXT: psrlq $63, %xmm1
259 ; SSE4-NEXT: packusdw %xmm1, %xmm0
260 ; SSE4-NEXT: ret{{[l|q]}}
262 ; AVX-LABEL: shuffle_lshr_2v2i64:
264 ; AVX-NEXT: vpsrlq $63, %xmm0, %xmm0
265 ; AVX-NEXT: vpsrlq $63, %xmm1, %xmm1
266 ; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
267 ; AVX-NEXT: ret{{[l|q]}}
268 %lshr0 = lshr <2 x i64> %a0, <i64 63, i64 63>
269 %lshr1 = lshr <2 x i64> %a1, <i64 63, i64 63>
270 %bc0 = bitcast <2 x i64> %lshr0 to <4 x i32>
271 %bc1 = bitcast <2 x i64> %lshr1 to <4 x i32>
272 %res = shufflevector <4 x i32> %bc0, <4 x i32> %bc1, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
276 define <4 x float> @shuffle_lshr_2v2i64_bitcast(<2 x i64> %a0, <2 x i64> %a1) {
277 ; SSE2-LABEL: shuffle_lshr_2v2i64_bitcast:
279 ; SSE2-NEXT: psrlq $63, %xmm0
280 ; SSE2-NEXT: psrlq $63, %xmm1
281 ; SSE2-NEXT: packuswb %xmm1, %xmm0
282 ; SSE2-NEXT: ret{{[l|q]}}
284 ; SSE4-LABEL: shuffle_lshr_2v2i64_bitcast:
286 ; SSE4-NEXT: psrlq $63, %xmm0
287 ; SSE4-NEXT: psrlq $63, %xmm1
288 ; SSE4-NEXT: packusdw %xmm1, %xmm0
289 ; SSE4-NEXT: ret{{[l|q]}}
291 ; AVX-LABEL: shuffle_lshr_2v2i64_bitcast:
293 ; AVX-NEXT: vpsrlq $63, %xmm0, %xmm0
294 ; AVX-NEXT: vpsrlq $63, %xmm1, %xmm1
295 ; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
296 ; AVX-NEXT: ret{{[l|q]}}
297 %lshr0 = lshr <2 x i64> %a0, <i64 63, i64 63>
298 %lshr1 = lshr <2 x i64> %a1, <i64 63, i64 63>
299 %bc0 = bitcast <2 x i64> %lshr0 to <4 x float>
300 %bc1 = bitcast <2 x i64> %lshr1 to <4 x float>
301 %res = shufflevector <4 x float> %bc0, <4 x float> %bc1, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
305 define <16 x i8> @packuswb_icmp_zero_128(<8 x i16> %a0) {
306 ; X86-SSE-LABEL: packuswb_icmp_zero_128:
308 ; X86-SSE-NEXT: pxor %xmm1, %xmm1
309 ; X86-SSE-NEXT: pcmpeqw %xmm0, %xmm1
310 ; X86-SSE-NEXT: packsswb %xmm1, %xmm1
311 ; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
312 ; X86-SSE-NEXT: movq {{.*#+}} xmm0 = xmm1[0],zero
315 ; X64-SSE-LABEL: packuswb_icmp_zero_128:
317 ; X64-SSE-NEXT: pxor %xmm1, %xmm1
318 ; X64-SSE-NEXT: pcmpeqw %xmm0, %xmm1
319 ; X64-SSE-NEXT: packsswb %xmm1, %xmm1
320 ; X64-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
321 ; X64-SSE-NEXT: movq {{.*#+}} xmm0 = xmm1[0],zero
324 ; X86-AVX-LABEL: packuswb_icmp_zero_128:
326 ; X86-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
327 ; X86-AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
328 ; X86-AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
329 ; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
330 ; X86-AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
333 ; X64-AVX-LABEL: packuswb_icmp_zero_128:
335 ; X64-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
336 ; X64-AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
337 ; X64-AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
338 ; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
339 ; X64-AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
341 %1 = icmp eq <8 x i16> %a0, zeroinitializer
342 %2 = zext <8 x i1> %1 to <8 x i8>
343 %3 = shufflevector <8 x i8> %2, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
347 define <16 x i8> @packuswb_icmp_zero_trunc_128(<8 x i16> %a0) {
348 ; SSE-LABEL: packuswb_icmp_zero_trunc_128:
350 ; SSE-NEXT: pxor %xmm1, %xmm1
351 ; SSE-NEXT: pcmpeqw %xmm1, %xmm0
352 ; SSE-NEXT: psrlw $15, %xmm0
353 ; SSE-NEXT: packuswb %xmm1, %xmm0
354 ; SSE-NEXT: ret{{[l|q]}}
356 ; AVX-LABEL: packuswb_icmp_zero_trunc_128:
358 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
359 ; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
360 ; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0
361 ; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
362 ; AVX-NEXT: ret{{[l|q]}}
363 %1 = icmp eq <8 x i16> %a0, zeroinitializer
364 %2 = zext <8 x i1> %1 to <8 x i16>
365 %3 = shufflevector <8 x i16> %2, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
366 %4 = trunc <16 x i16> %3 to <16 x i8>
370 define <32 x i8> @packuswb_icmp_zero_256(<16 x i16> %a0) {
371 ; SSE-LABEL: packuswb_icmp_zero_256:
373 ; SSE-NEXT: pxor %xmm2, %xmm2
374 ; SSE-NEXT: pcmpeqw %xmm2, %xmm1
375 ; SSE-NEXT: psrlw $15, %xmm1
376 ; SSE-NEXT: pcmpeqw %xmm2, %xmm0
377 ; SSE-NEXT: psrlw $15, %xmm0
378 ; SSE-NEXT: pxor %xmm3, %xmm3
379 ; SSE-NEXT: packuswb %xmm0, %xmm3
380 ; SSE-NEXT: packuswb %xmm1, %xmm2
381 ; SSE-NEXT: movdqa %xmm3, %xmm0
382 ; SSE-NEXT: movdqa %xmm2, %xmm1
383 ; SSE-NEXT: ret{{[l|q]}}
385 ; X86-AVX1-LABEL: packuswb_icmp_zero_256:
387 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
388 ; X86-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
389 ; X86-AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1
390 ; X86-AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
391 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
392 ; X86-AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
393 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
394 ; X86-AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
395 ; X86-AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
396 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
397 ; X86-AVX1-NEXT: retl
399 ; X64-AVX1-LABEL: packuswb_icmp_zero_256:
401 ; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
402 ; X64-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
403 ; X64-AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1
404 ; X64-AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
405 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
406 ; X64-AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
407 ; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
408 ; X64-AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
409 ; X64-AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
410 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
411 ; X64-AVX1-NEXT: retq
413 ; AVX2-LABEL: packuswb_icmp_zero_256:
415 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
416 ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
417 ; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
418 ; AVX2-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
419 ; AVX2-NEXT: ret{{[l|q]}}
420 %1 = icmp eq <16 x i16> %a0, zeroinitializer
421 %2 = zext <16 x i1> %1 to <16 x i16>
422 %3 = bitcast <16 x i16> %2 to <32 x i8>
423 %4 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
427 define <32 x i8> @packuswb_icmp_zero_trunc_256(<16 x i16> %a0) {
428 ; SSE-LABEL: packuswb_icmp_zero_trunc_256:
430 ; SSE-NEXT: pxor %xmm2, %xmm2
431 ; SSE-NEXT: pcmpeqw %xmm2, %xmm1
432 ; SSE-NEXT: psrlw $15, %xmm1
433 ; SSE-NEXT: pcmpeqw %xmm2, %xmm0
434 ; SSE-NEXT: psrlw $15, %xmm0
435 ; SSE-NEXT: pxor %xmm3, %xmm3
436 ; SSE-NEXT: packuswb %xmm0, %xmm3
437 ; SSE-NEXT: packuswb %xmm1, %xmm2
438 ; SSE-NEXT: movdqa %xmm3, %xmm0
439 ; SSE-NEXT: movdqa %xmm2, %xmm1
440 ; SSE-NEXT: ret{{[l|q]}}
442 ; AVX1-LABEL: packuswb_icmp_zero_trunc_256:
444 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
445 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm2
446 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
447 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
448 ; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
449 ; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
450 ; AVX1-NEXT: vpsrlw $15, %xmm2, %xmm2
451 ; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
452 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
453 ; AVX1-NEXT: ret{{[l|q]}}
455 ; AVX2-LABEL: packuswb_icmp_zero_trunc_256:
457 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
458 ; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
459 ; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
460 ; AVX2-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
461 ; AVX2-NEXT: ret{{[l|q]}}
462 %1 = icmp eq <16 x i16> %a0, zeroinitializer
463 %2 = zext <16 x i1> %1 to <16 x i16>
464 %3 = shufflevector <16 x i16> zeroinitializer, <16 x i16> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
465 %4 = trunc <32 x i16> %3 to <32 x i8>
468 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: