1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=X86-SSE
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 --check-prefix=X86-AVX --check-prefix=X86-AVX1
4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=X86-AVX --check-prefix=X86-AVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=X64-SSE
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 --check-prefix=X64-AVX --check-prefix=X64-AVX1
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=X64-AVX --check-prefix=X64-AVX2
9 define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
10 ; SSE-LABEL: trunc_ashr_v4i64:
12 ; SSE-NEXT: psrad $31, %xmm1
13 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
14 ; SSE-NEXT: psrad $31, %xmm0
15 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
16 ; SSE-NEXT: packssdw %xmm1, %xmm0
17 ; SSE-NEXT: ret{{[l|q]}}
19 ; AVX1-LABEL: trunc_ashr_v4i64:
21 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
22 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
23 ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
24 ; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
25 ; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
26 ; AVX1-NEXT: vzeroupper
27 ; AVX1-NEXT: ret{{[l|q]}}
29 ; AVX2-LABEL: trunc_ashr_v4i64:
31 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
32 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
33 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
34 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
35 ; AVX2-NEXT: vzeroupper
36 ; AVX2-NEXT: ret{{[l|q]}}
37 %1 = ashr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
38 %2 = trunc <4 x i64> %1 to <4 x i32>
42 define <8 x i16> @trunc_ashr_v4i64_bitcast(<4 x i64> %a0) {
43 ; SSE-LABEL: trunc_ashr_v4i64_bitcast:
45 ; SSE-NEXT: movdqa %xmm1, %xmm2
46 ; SSE-NEXT: psrad $31, %xmm2
47 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
48 ; SSE-NEXT: psrad $17, %xmm1
49 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
50 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
51 ; SSE-NEXT: movdqa %xmm0, %xmm2
52 ; SSE-NEXT: psrad $31, %xmm2
53 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
54 ; SSE-NEXT: psrad $17, %xmm0
55 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
56 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
57 ; SSE-NEXT: packssdw %xmm1, %xmm0
58 ; SSE-NEXT: ret{{[l|q]}}
60 ; AVX1-LABEL: trunc_ashr_v4i64_bitcast:
62 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
63 ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
64 ; AVX1-NEXT: vpsrad $17, %xmm1, %xmm1
65 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
66 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
67 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
68 ; AVX1-NEXT: vpsrad $17, %xmm0, %xmm0
69 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
70 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
71 ; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
72 ; AVX1-NEXT: vzeroupper
73 ; AVX1-NEXT: ret{{[l|q]}}
75 ; AVX2-LABEL: trunc_ashr_v4i64_bitcast:
77 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
78 ; AVX2-NEXT: vpsrad $17, %ymm0, %ymm0
79 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
80 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
81 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
82 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
83 ; AVX2-NEXT: vzeroupper
84 ; AVX2-NEXT: ret{{[l|q]}}
85 %1 = ashr <4 x i64> %a0, <i64 49, i64 49, i64 49, i64 49>
86 %2 = bitcast <4 x i64> %1 to <8 x i32>
87 %3 = trunc <8 x i32> %2 to <8 x i16>
91 define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
92 ; SSE-LABEL: trunc_ashr_v8i32:
94 ; SSE-NEXT: psrad $31, %xmm1
95 ; SSE-NEXT: psrad $31, %xmm0
96 ; SSE-NEXT: packssdw %xmm1, %xmm0
97 ; SSE-NEXT: ret{{[l|q]}}
99 ; AVX1-LABEL: trunc_ashr_v8i32:
101 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
102 ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
103 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
104 ; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
105 ; AVX1-NEXT: vzeroupper
106 ; AVX1-NEXT: ret{{[l|q]}}
108 ; AVX2-LABEL: trunc_ashr_v8i32:
110 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
111 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
112 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
113 ; AVX2-NEXT: vzeroupper
114 ; AVX2-NEXT: ret{{[l|q]}}
115 %1 = ashr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
116 %2 = trunc <8 x i32> %1 to <8 x i16>
120 define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
121 ; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
123 ; X86-SSE-NEXT: psrad $31, %xmm0
124 ; X86-SSE-NEXT: pcmpgtd {{\.LCPI.*}}, %xmm1
125 ; X86-SSE-NEXT: packssdw %xmm1, %xmm0
128 ; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
130 ; X86-AVX-NEXT: vpsrad $31, %xmm0, %xmm0
131 ; X86-AVX-NEXT: vpcmpgtd {{\.LCPI.*}}, %xmm1, %xmm1
132 ; X86-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
135 ; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
137 ; X64-SSE-NEXT: psrad $31, %xmm0
138 ; X64-SSE-NEXT: pcmpgtd {{.*}}(%rip), %xmm1
139 ; X64-SSE-NEXT: packssdw %xmm1, %xmm0
142 ; X64-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
144 ; X64-AVX-NEXT: vpsrad $31, %xmm0, %xmm0
145 ; X64-AVX-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1
146 ; X64-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
148 %1 = ashr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31>
149 %2 = icmp sgt <4 x i32> %b, <i32 1, i32 16, i32 255, i32 65535>
150 %3 = sext <4 x i1> %2 to <4 x i32>
151 %4 = shufflevector <4 x i32> %1, <4 x i32> %3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
152 %5 = trunc <8 x i32> %4 to <8 x i16>
156 define <8 x i16> @trunc_ashr_v4i64_demandedelts(<4 x i64> %a0) {
157 ; X86-SSE-LABEL: trunc_ashr_v4i64_demandedelts:
159 ; X86-SSE-NEXT: psllq $63, %xmm1
160 ; X86-SSE-NEXT: psllq $63, %xmm0
161 ; X86-SSE-NEXT: psrlq $63, %xmm0
162 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [4.9406564584124654E-324,-0.0E+0]
163 ; X86-SSE-NEXT: pxor %xmm2, %xmm0
164 ; X86-SSE-NEXT: psubq %xmm2, %xmm0
165 ; X86-SSE-NEXT: psrlq $63, %xmm1
166 ; X86-SSE-NEXT: pxor %xmm2, %xmm1
167 ; X86-SSE-NEXT: psubq %xmm2, %xmm1
168 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
169 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
170 ; X86-SSE-NEXT: packssdw %xmm1, %xmm0
173 ; X86-AVX1-LABEL: trunc_ashr_v4i64_demandedelts:
175 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
176 ; X86-AVX1-NEXT: vpsllq $63, %xmm1, %xmm2
177 ; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
178 ; X86-AVX1-NEXT: vpsllq $63, %xmm0, %xmm2
179 ; X86-AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
180 ; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,0,2147483648]
181 ; X86-AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
182 ; X86-AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
183 ; X86-AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2
184 ; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
185 ; X86-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
186 ; X86-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
187 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
188 ; X86-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
189 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
190 ; X86-AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
191 ; X86-AVX1-NEXT: vzeroupper
192 ; X86-AVX1-NEXT: retl
194 ; X86-AVX2-LABEL: trunc_ashr_v4i64_demandedelts:
196 ; X86-AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [63,0,0,0,63,0,0,0]
197 ; X86-AVX2-NEXT: # ymm1 = mem[0,1,0,1]
198 ; X86-AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
199 ; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
200 ; X86-AVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm2
201 ; X86-AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
202 ; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
203 ; X86-AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
204 ; X86-AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
205 ; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
206 ; X86-AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
207 ; X86-AVX2-NEXT: vzeroupper
208 ; X86-AVX2-NEXT: retl
210 ; X64-SSE-LABEL: trunc_ashr_v4i64_demandedelts:
212 ; X64-SSE-NEXT: psllq $63, %xmm1
213 ; X64-SSE-NEXT: psllq $63, %xmm0
214 ; X64-SSE-NEXT: psrlq $63, %xmm0
215 ; X64-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,9223372036854775808]
216 ; X64-SSE-NEXT: pxor %xmm2, %xmm0
217 ; X64-SSE-NEXT: psubq %xmm2, %xmm0
218 ; X64-SSE-NEXT: psrlq $63, %xmm1
219 ; X64-SSE-NEXT: pxor %xmm2, %xmm1
220 ; X64-SSE-NEXT: psubq %xmm2, %xmm1
221 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
222 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
223 ; X64-SSE-NEXT: packssdw %xmm1, %xmm0
226 ; X64-AVX1-LABEL: trunc_ashr_v4i64_demandedelts:
228 ; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
229 ; X64-AVX1-NEXT: vpsllq $63, %xmm1, %xmm2
230 ; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
231 ; X64-AVX1-NEXT: vpsllq $63, %xmm0, %xmm2
232 ; X64-AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1
233 ; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,9223372036854775808]
234 ; X64-AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1
235 ; X64-AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
236 ; X64-AVX1-NEXT: vpsrlq $63, %xmm2, %xmm2
237 ; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
238 ; X64-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0
239 ; X64-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
240 ; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
241 ; X64-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
242 ; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
243 ; X64-AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
244 ; X64-AVX1-NEXT: vzeroupper
245 ; X64-AVX1-NEXT: retq
247 ; X64-AVX2-LABEL: trunc_ashr_v4i64_demandedelts:
249 ; X64-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
250 ; X64-AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
251 ; X64-AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [1,9223372036854775808,1,9223372036854775808]
252 ; X64-AVX2-NEXT: # ymm1 = mem[0,1,0,1]
253 ; X64-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
254 ; X64-AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
255 ; X64-AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
256 ; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
257 ; X64-AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
258 ; X64-AVX2-NEXT: vzeroupper
259 ; X64-AVX2-NEXT: retq
260 %1 = shl <4 x i64> %a0, <i64 63, i64 0, i64 63, i64 0>
261 %2 = ashr <4 x i64> %1, <i64 63, i64 0, i64 63, i64 0>
262 %3 = bitcast <4 x i64> %2 to <8 x i32>
263 %4 = shufflevector <8 x i32> %3, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
264 %5 = trunc <8 x i32> %4 to <8 x i16>