1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
7 define <4 x i32> @test1(<8 x i32> %v) {
14 ; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
15 ; AVX-NEXT: vzeroupper
17 %x = sext <8 x i32> %v to <8 x i64>
18 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
19 %t = trunc <4 x i64> %s to <4 x i32>
23 define <4 x i32> @test2(<8 x i32> %v) {
26 ; SSE2-NEXT: movaps %xmm1, %xmm0
31 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
32 ; AVX-NEXT: vzeroupper
34 %x = sext <8 x i32> %v to <8 x i64>
35 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
36 %t = trunc <4 x i64> %s to <4 x i32>
40 define <2 x i32> @test3(<8 x i32> %v) {
43 ; SSE2-NEXT: movdqa %xmm1, %xmm0
44 ; SSE2-NEXT: pxor %xmm1, %xmm1
45 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
46 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
51 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
52 ; AVX2-NEXT: vpmovsxdq %xmm0, %xmm0
53 ; AVX2-NEXT: vzeroupper
56 ; AVX512-LABEL: test3:
58 ; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
59 ; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm0
60 ; AVX512-NEXT: vzeroupper
62 %x = sext <8 x i32> %v to <8 x i64>
63 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 4, i32 5>
64 %t = trunc <2 x i64> %s to <2 x i32>
68 define <2 x i32> @test4(<8 x i32> %v) {
71 ; SSE2-NEXT: pxor %xmm1, %xmm1
72 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
73 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
78 ; AVX2-NEXT: vpmovsxdq %xmm0, %xmm0
79 ; AVX2-NEXT: vzeroupper
82 ; AVX512-LABEL: test4:
84 ; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
85 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
86 ; AVX512-NEXT: vzeroupper
88 %x = sext <8 x i32> %v to <8 x i64>
89 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
90 %t = trunc <2 x i64> %s to <2 x i32>
94 define <2 x i32> @test5(<8 x i32> %v) {
97 ; SSE2-NEXT: pxor %xmm2, %xmm2
98 ; SSE2-NEXT: pxor %xmm3, %xmm3
99 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
100 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
101 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
102 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
103 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
104 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm1[0,1]
109 ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm1
110 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
111 ; AVX2-NEXT: vpmovsxdq %xmm0, %xmm0
112 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
113 ; AVX2-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
114 ; AVX2-NEXT: vzeroupper
117 ; AVX512-LABEL: test5:
119 ; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
120 ; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm1
121 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
122 ; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
123 ; AVX512-NEXT: vzeroupper
125 %x = sext <8 x i32> %v to <8 x i64>
126 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 3, i32 4>
127 %t = trunc <2 x i64> %s to <2 x i32>
131 define <4 x i32> @test6(<8 x i32> %v) {
138 ; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
139 ; AVX-NEXT: vzeroupper
141 %x = zext <8 x i32> %v to <8 x i64>
142 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
143 %t = trunc <4 x i64> %s to <4 x i32>
147 define <4 x i32> @test7(<8 x i32> %v) {
150 ; SSE2-NEXT: movaps %xmm1, %xmm0
155 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
156 ; AVX-NEXT: vzeroupper
158 %x = zext <8 x i32> %v to <8 x i64>
159 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
160 %t = trunc <4 x i64> %s to <4 x i32>
164 define <2 x i32> @test8(<8 x i32> %v) {
167 ; SSE2-NEXT: movaps %xmm1, %xmm0
168 ; SSE2-NEXT: xorps %xmm1, %xmm1
169 ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
174 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
175 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
176 ; AVX2-NEXT: vzeroupper
179 ; AVX512-LABEL: test8:
181 ; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
182 ; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm0
183 ; AVX512-NEXT: vzeroupper
185 %x = zext <8 x i32> %v to <8 x i64>
186 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 4, i32 5>
187 %t = trunc <2 x i64> %s to <2 x i32>
191 define <2 x i32> @test9(<8 x i32> %v) {
194 ; SSE2-NEXT: xorps %xmm1, %xmm1
195 ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
200 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
201 ; AVX2-NEXT: vzeroupper
204 ; AVX512-LABEL: test9:
206 ; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
207 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
208 ; AVX512-NEXT: vzeroupper
210 %x = zext <8 x i32> %v to <8 x i64>
211 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
212 %t = trunc <2 x i64> %s to <2 x i32>
216 define <2 x i32> @test10(<8 x i32> %v) {
217 ; SSE2-LABEL: test10:
219 ; SSE2-NEXT: xorps %xmm2, %xmm2
220 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
221 ; SSE2-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
222 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm1[0,1]
225 ; AVX2-LABEL: test10:
227 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
228 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
229 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
230 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
231 ; AVX2-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
232 ; AVX2-NEXT: vzeroupper
235 ; AVX512-LABEL: test10:
237 ; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
238 ; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm1
239 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
240 ; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
241 ; AVX512-NEXT: vzeroupper
243 %x = zext <8 x i32> %v to <8 x i64>
244 %s = shufflevector <8 x i64> %x, <8 x i64> undef, <2 x i32> <i32 3, i32 4>
245 %t = trunc <2 x i64> %s to <2 x i32>