1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3| FileCheck %s --check-prefixes=AVX,AVX2
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512
8 define <2 x i32> @stest_f64i32(<2 x double> %x) nounwind {
9 ; SSE-LABEL: stest_f64i32:
10 ; SSE: # %bb.0: # %entry
11 ; SSE-NEXT: cvttsd2si %xmm0, %rax
12 ; SSE-NEXT: movq %rax, %xmm1
13 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
14 ; SSE-NEXT: cvttsd2si %xmm0, %rax
15 ; SSE-NEXT: movq %rax, %xmm0
16 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
17 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
18 ; SSE-NEXT: movdqa %xmm1, %xmm2
19 ; SSE-NEXT: pxor %xmm0, %xmm2
20 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
21 ; SSE-NEXT: pxor %xmm4, %xmm4
22 ; SSE-NEXT: pcmpeqd %xmm3, %xmm4
23 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
24 ; SSE-NEXT: pcmpgtd %xmm2, %xmm3
25 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
26 ; SSE-NEXT: pand %xmm4, %xmm2
27 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
28 ; SSE-NEXT: por %xmm2, %xmm3
29 ; SSE-NEXT: pand %xmm3, %xmm1
30 ; SSE-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
31 ; SSE-NEXT: por %xmm1, %xmm3
32 ; SSE-NEXT: pxor %xmm3, %xmm0
33 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
34 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2
35 ; SSE-NEXT: pcmpeqd %xmm1, %xmm2
36 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
37 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
38 ; SSE-NEXT: pand %xmm2, %xmm1
39 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
40 ; SSE-NEXT: por %xmm1, %xmm0
41 ; SSE-NEXT: pand %xmm0, %xmm3
42 ; SSE-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
43 ; SSE-NEXT: por %xmm3, %xmm0
44 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
47 ; AVX2-LABEL: stest_f64i32:
48 ; AVX2: # %bb.0: # %entry
49 ; AVX2-NEXT: vcvttsd2si %xmm0, %rax
50 ; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
51 ; AVX2-NEXT: vcvttsd2si %xmm0, %rcx
52 ; AVX2-NEXT: vmovq %rax, %xmm0
53 ; AVX2-NEXT: vmovq %rcx, %xmm1
54 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
55 ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2147483647,2147483647]
56 ; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
57 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
58 ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [18446744071562067968,18446744071562067968]
59 ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
60 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
61 ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
64 ; AVX512-LABEL: stest_f64i32:
65 ; AVX512: # %bb.0: # %entry
66 ; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm0
67 ; AVX512-NEXT: vpmovsqd %xmm0, %xmm0
70 %conv = fptosi <2 x double> %x to <2 x i64>
71 %0 = icmp slt <2 x i64> %conv, <i64 2147483647, i64 2147483647>
72 %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 2147483647, i64 2147483647>
73 %1 = icmp sgt <2 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648>
74 %spec.store.select7 = select <2 x i1> %1, <2 x i64> %spec.store.select, <2 x i64> <i64 -2147483648, i64 -2147483648>
75 %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32>
79 define <2 x i32> @utest_f64i32(<2 x double> %x) nounwind {
80 ; SSE-LABEL: utest_f64i32:
81 ; SSE: # %bb.0: # %entry
82 ; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
83 ; SSE-NEXT: movapd %xmm0, %xmm2
84 ; SSE-NEXT: subsd %xmm1, %xmm2
85 ; SSE-NEXT: cvttsd2si %xmm2, %rax
86 ; SSE-NEXT: cvttsd2si %xmm0, %rcx
87 ; SSE-NEXT: movq %rcx, %rdx
88 ; SSE-NEXT: sarq $63, %rdx
89 ; SSE-NEXT: andq %rax, %rdx
90 ; SSE-NEXT: orq %rcx, %rdx
91 ; SSE-NEXT: movq %rdx, %xmm2
92 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
93 ; SSE-NEXT: cvttsd2si %xmm0, %rax
94 ; SSE-NEXT: subsd %xmm1, %xmm0
95 ; SSE-NEXT: cvttsd2si %xmm0, %rcx
96 ; SSE-NEXT: movq %rax, %rdx
97 ; SSE-NEXT: sarq $63, %rdx
98 ; SSE-NEXT: andq %rcx, %rdx
99 ; SSE-NEXT: orq %rax, %rdx
100 ; SSE-NEXT: movq %rdx, %xmm0
101 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
102 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
103 ; SSE-NEXT: pxor %xmm2, %xmm0
104 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
105 ; SSE-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
106 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
107 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
108 ; SSE-NEXT: pandn %xmm1, %xmm0
109 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
110 ; SSE-NEXT: pxor %xmm0, %xmm1
111 ; SSE-NEXT: pand %xmm2, %xmm0
112 ; SSE-NEXT: por %xmm1, %xmm0
113 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
116 ; AVX2-LABEL: utest_f64i32:
117 ; AVX2: # %bb.0: # %entry
118 ; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
119 ; AVX2-NEXT: vsubsd %xmm1, %xmm0, %xmm2
120 ; AVX2-NEXT: vcvttsd2si %xmm2, %rax
121 ; AVX2-NEXT: vcvttsd2si %xmm0, %rcx
122 ; AVX2-NEXT: movq %rcx, %rdx
123 ; AVX2-NEXT: sarq $63, %rdx
124 ; AVX2-NEXT: andq %rax, %rdx
125 ; AVX2-NEXT: orq %rcx, %rdx
126 ; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
127 ; AVX2-NEXT: vsubsd %xmm1, %xmm0, %xmm1
128 ; AVX2-NEXT: vcvttsd2si %xmm1, %rax
129 ; AVX2-NEXT: vmovq %rdx, %xmm1
130 ; AVX2-NEXT: vcvttsd2si %xmm0, %rcx
131 ; AVX2-NEXT: movq %rcx, %rdx
132 ; AVX2-NEXT: sarq $63, %rdx
133 ; AVX2-NEXT: andq %rax, %rdx
134 ; AVX2-NEXT: orq %rcx, %rdx
135 ; AVX2-NEXT: vmovq %rdx, %xmm0
136 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
137 ; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
138 ; AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
139 ; AVX2-NEXT: vblendvpd %xmm1, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
140 ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
143 ; AVX512-LABEL: utest_f64i32:
144 ; AVX512: # %bb.0: # %entry
145 ; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm0
146 ; AVX512-NEXT: vpmovusqd %xmm0, %xmm0
149 %conv = fptoui <2 x double> %x to <2 x i64>
150 %0 = icmp ult <2 x i64> %conv, <i64 4294967295, i64 4294967295>
151 %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>
152 %conv6 = trunc <2 x i64> %spec.store.select to <2 x i32>
156 define <2 x i32> @ustest_f64i32(<2 x double> %x) nounwind {
157 ; SSE-LABEL: ustest_f64i32:
158 ; SSE: # %bb.0: # %entry
159 ; SSE-NEXT: cvttsd2si %xmm0, %rax
160 ; SSE-NEXT: movq %rax, %xmm1
161 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
162 ; SSE-NEXT: cvttsd2si %xmm0, %rax
163 ; SSE-NEXT: movq %rax, %xmm0
164 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
165 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
166 ; SSE-NEXT: movdqa %xmm1, %xmm2
167 ; SSE-NEXT: pxor %xmm0, %xmm2
168 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
169 ; SSE-NEXT: pxor %xmm4, %xmm4
170 ; SSE-NEXT: pcmpeqd %xmm3, %xmm4
171 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647]
172 ; SSE-NEXT: pcmpgtd %xmm2, %xmm3
173 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
174 ; SSE-NEXT: pand %xmm4, %xmm2
175 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
176 ; SSE-NEXT: por %xmm2, %xmm3
177 ; SSE-NEXT: pand %xmm3, %xmm1
178 ; SSE-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
179 ; SSE-NEXT: por %xmm1, %xmm3
180 ; SSE-NEXT: movdqa %xmm3, %xmm1
181 ; SSE-NEXT: pxor %xmm0, %xmm1
182 ; SSE-NEXT: movdqa %xmm1, %xmm2
183 ; SSE-NEXT: pcmpgtd %xmm0, %xmm2
184 ; SSE-NEXT: pcmpeqd %xmm0, %xmm1
185 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
186 ; SSE-NEXT: pand %xmm2, %xmm0
187 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
188 ; SSE-NEXT: por %xmm0, %xmm1
189 ; SSE-NEXT: pand %xmm3, %xmm1
190 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
193 ; AVX2-LABEL: ustest_f64i32:
194 ; AVX2: # %bb.0: # %entry
195 ; AVX2-NEXT: vcvttsd2si %xmm0, %rax
196 ; AVX2-NEXT: vmovq %rax, %xmm1
197 ; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
198 ; AVX2-NEXT: vcvttsd2si %xmm0, %rax
199 ; AVX2-NEXT: vmovq %rax, %xmm0
200 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
201 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4294967295,0,4294967295,0]
202 ; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
203 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
204 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
205 ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm1
206 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
207 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
210 ; AVX512-LABEL: ustest_f64i32:
211 ; AVX512: # %bb.0: # %entry
212 ; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm0
213 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
214 ; AVX512-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
215 ; AVX512-NEXT: vpmovusqd %xmm0, %xmm0
218 %conv = fptosi <2 x double> %x to <2 x i64>
219 %0 = icmp slt <2 x i64> %conv, <i64 4294967295, i64 4294967295>
220 %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>
221 %1 = icmp sgt <2 x i64> %spec.store.select, zeroinitializer
222 %spec.store.select7 = select <2 x i1> %1, <2 x i64> %spec.store.select, <2 x i64> zeroinitializer
223 %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32>
227 define <4 x i32> @stest_f32i32(<4 x float> %x) nounwind {
228 ; SSE-LABEL: stest_f32i32:
229 ; SSE: # %bb.0: # %entry
230 ; SSE-NEXT: movaps %xmm0, %xmm1
231 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
232 ; SSE-NEXT: cvttss2si %xmm1, %rax
233 ; SSE-NEXT: movq %rax, %xmm1
234 ; SSE-NEXT: movaps %xmm0, %xmm2
235 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
236 ; SSE-NEXT: cvttss2si %xmm2, %rax
237 ; SSE-NEXT: movq %rax, %xmm2
238 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
239 ; SSE-NEXT: cvttss2si %xmm0, %rax
240 ; SSE-NEXT: movq %rax, %xmm4
241 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
242 ; SSE-NEXT: cvttss2si %xmm0, %rax
243 ; SSE-NEXT: movq %rax, %xmm0
244 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
245 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647]
246 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
247 ; SSE-NEXT: movdqa %xmm4, %xmm1
248 ; SSE-NEXT: pxor %xmm0, %xmm1
249 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
250 ; SSE-NEXT: pxor %xmm6, %xmm6
251 ; SSE-NEXT: pcmpeqd %xmm6, %xmm5
252 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [4294967295,4294967295]
253 ; SSE-NEXT: movdqa %xmm7, %xmm8
254 ; SSE-NEXT: pcmpgtd %xmm1, %xmm8
255 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,2,2]
256 ; SSE-NEXT: pand %xmm5, %xmm9
257 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3]
258 ; SSE-NEXT: por %xmm9, %xmm1
259 ; SSE-NEXT: pand %xmm1, %xmm4
260 ; SSE-NEXT: pandn %xmm3, %xmm1
261 ; SSE-NEXT: por %xmm4, %xmm1
262 ; SSE-NEXT: movdqa %xmm2, %xmm4
263 ; SSE-NEXT: pxor %xmm0, %xmm4
264 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
265 ; SSE-NEXT: pcmpeqd %xmm6, %xmm5
266 ; SSE-NEXT: pcmpgtd %xmm4, %xmm7
267 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
268 ; SSE-NEXT: pand %xmm5, %xmm4
269 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
270 ; SSE-NEXT: por %xmm4, %xmm5
271 ; SSE-NEXT: pand %xmm5, %xmm2
272 ; SSE-NEXT: pandn %xmm3, %xmm5
273 ; SSE-NEXT: por %xmm2, %xmm5
274 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [18446744071562067968,18446744071562067968]
275 ; SSE-NEXT: movdqa %xmm5, %xmm3
276 ; SSE-NEXT: pxor %xmm0, %xmm3
277 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
278 ; SSE-NEXT: pcmpeqd %xmm6, %xmm6
279 ; SSE-NEXT: pcmpeqd %xmm6, %xmm4
280 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [18446744069414584320,18446744069414584320]
281 ; SSE-NEXT: pcmpgtd %xmm7, %xmm3
282 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
283 ; SSE-NEXT: pand %xmm4, %xmm8
284 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
285 ; SSE-NEXT: por %xmm8, %xmm3
286 ; SSE-NEXT: pand %xmm3, %xmm5
287 ; SSE-NEXT: pandn %xmm2, %xmm3
288 ; SSE-NEXT: por %xmm5, %xmm3
289 ; SSE-NEXT: pxor %xmm1, %xmm0
290 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
291 ; SSE-NEXT: pcmpeqd %xmm6, %xmm4
292 ; SSE-NEXT: pcmpgtd %xmm7, %xmm0
293 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
294 ; SSE-NEXT: pand %xmm4, %xmm5
295 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
296 ; SSE-NEXT: por %xmm5, %xmm0
297 ; SSE-NEXT: pand %xmm0, %xmm1
298 ; SSE-NEXT: pandn %xmm2, %xmm0
299 ; SSE-NEXT: por %xmm1, %xmm0
300 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
303 ; AVX2-LABEL: stest_f32i32:
304 ; AVX2: # %bb.0: # %entry
305 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
306 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
307 ; AVX2-NEXT: vmovq %rax, %xmm1
308 ; AVX2-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
309 ; AVX2-NEXT: vcvttss2si %xmm2, %rax
310 ; AVX2-NEXT: vmovq %rax, %xmm2
311 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
312 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
313 ; AVX2-NEXT: vmovq %rax, %xmm2
314 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
315 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
316 ; AVX2-NEXT: vmovq %rax, %xmm0
317 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
318 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
319 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
320 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
321 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
322 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
323 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
324 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
325 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
326 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
327 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
328 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
329 ; AVX2-NEXT: vzeroupper
332 ; AVX512-LABEL: stest_f32i32:
333 ; AVX512: # %bb.0: # %entry
334 ; AVX512-NEXT: vcvttps2qq %xmm0, %ymm0
335 ; AVX512-NEXT: vpmovsqd %ymm0, %xmm0
336 ; AVX512-NEXT: vzeroupper
339 %conv = fptosi <4 x float> %x to <4 x i64>
340 %0 = icmp slt <4 x i64> %conv, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
341 %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
342 %1 = icmp sgt <4 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
343 %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
344 %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
348 define <4 x i32> @utest_f32i32(<4 x float> %x) nounwind {
349 ; SSE-LABEL: utest_f32i32:
350 ; SSE: # %bb.0: # %entry
351 ; SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
352 ; SSE-NEXT: movaps %xmm0, %xmm1
353 ; SSE-NEXT: subss %xmm2, %xmm1
354 ; SSE-NEXT: cvttss2si %xmm1, %rax
355 ; SSE-NEXT: cvttss2si %xmm0, %rcx
356 ; SSE-NEXT: movq %rcx, %rdx
357 ; SSE-NEXT: sarq $63, %rdx
358 ; SSE-NEXT: andq %rax, %rdx
359 ; SSE-NEXT: orq %rcx, %rdx
360 ; SSE-NEXT: movq %rdx, %xmm1
361 ; SSE-NEXT: movaps %xmm0, %xmm3
362 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1]
363 ; SSE-NEXT: cvttss2si %xmm3, %rax
364 ; SSE-NEXT: subss %xmm2, %xmm3
365 ; SSE-NEXT: cvttss2si %xmm3, %rcx
366 ; SSE-NEXT: movq %rax, %rdx
367 ; SSE-NEXT: sarq $63, %rdx
368 ; SSE-NEXT: andq %rcx, %rdx
369 ; SSE-NEXT: orq %rax, %rdx
370 ; SSE-NEXT: movq %rdx, %xmm3
371 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
372 ; SSE-NEXT: movaps %xmm0, %xmm3
373 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm0[3,3]
374 ; SSE-NEXT: cvttss2si %xmm3, %rax
375 ; SSE-NEXT: subss %xmm2, %xmm3
376 ; SSE-NEXT: cvttss2si %xmm3, %rcx
377 ; SSE-NEXT: movq %rax, %rdx
378 ; SSE-NEXT: sarq $63, %rdx
379 ; SSE-NEXT: andq %rcx, %rdx
380 ; SSE-NEXT: orq %rax, %rdx
381 ; SSE-NEXT: movq %rdx, %xmm3
382 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
383 ; SSE-NEXT: cvttss2si %xmm0, %rax
384 ; SSE-NEXT: subss %xmm2, %xmm0
385 ; SSE-NEXT: cvttss2si %xmm0, %rcx
386 ; SSE-NEXT: movq %rax, %rdx
387 ; SSE-NEXT: sarq $63, %rdx
388 ; SSE-NEXT: andq %rcx, %rdx
389 ; SSE-NEXT: orq %rax, %rdx
390 ; SSE-NEXT: movq %rdx, %xmm0
391 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
392 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
393 ; SSE-NEXT: movdqa %xmm0, %xmm3
394 ; SSE-NEXT: pxor %xmm2, %xmm3
395 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
396 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [2147483647,2147483647,2147483647,2147483647]
397 ; SSE-NEXT: movdqa %xmm5, %xmm6
398 ; SSE-NEXT: pcmpgtd %xmm4, %xmm6
399 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
400 ; SSE-NEXT: pcmpeqd %xmm2, %xmm3
401 ; SSE-NEXT: pand %xmm6, %xmm3
402 ; SSE-NEXT: pcmpeqd %xmm4, %xmm4
403 ; SSE-NEXT: pand %xmm3, %xmm0
404 ; SSE-NEXT: pxor %xmm4, %xmm3
405 ; SSE-NEXT: por %xmm0, %xmm3
406 ; SSE-NEXT: movdqa %xmm1, %xmm0
407 ; SSE-NEXT: pxor %xmm2, %xmm0
408 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
409 ; SSE-NEXT: pcmpgtd %xmm6, %xmm5
410 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
411 ; SSE-NEXT: pcmpeqd %xmm2, %xmm0
412 ; SSE-NEXT: pand %xmm5, %xmm0
413 ; SSE-NEXT: pxor %xmm0, %xmm4
414 ; SSE-NEXT: pand %xmm1, %xmm0
415 ; SSE-NEXT: por %xmm4, %xmm0
416 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
419 ; AVX2-LABEL: utest_f32i32:
420 ; AVX2: # %bb.0: # %entry
421 ; AVX2-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
422 ; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
423 ; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
424 ; AVX2-NEXT: vcvttss2si %xmm3, %rax
425 ; AVX2-NEXT: vcvttss2si %xmm2, %rcx
426 ; AVX2-NEXT: movq %rcx, %rdx
427 ; AVX2-NEXT: sarq $63, %rdx
428 ; AVX2-NEXT: andq %rax, %rdx
429 ; AVX2-NEXT: orq %rcx, %rdx
430 ; AVX2-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
431 ; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
432 ; AVX2-NEXT: vcvttss2si %xmm3, %rax
433 ; AVX2-NEXT: vcvttss2si %xmm2, %rcx
434 ; AVX2-NEXT: vmovq %rdx, %xmm2
435 ; AVX2-NEXT: movq %rcx, %rdx
436 ; AVX2-NEXT: sarq $63, %rdx
437 ; AVX2-NEXT: andq %rax, %rdx
438 ; AVX2-NEXT: orq %rcx, %rdx
439 ; AVX2-NEXT: vmovq %rdx, %xmm3
440 ; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm4
441 ; AVX2-NEXT: vcvttss2si %xmm4, %rax
442 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
443 ; AVX2-NEXT: vcvttss2si %xmm0, %rcx
444 ; AVX2-NEXT: movq %rcx, %rdx
445 ; AVX2-NEXT: sarq $63, %rdx
446 ; AVX2-NEXT: andq %rax, %rdx
447 ; AVX2-NEXT: orq %rcx, %rdx
448 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
449 ; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm1
450 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
451 ; AVX2-NEXT: vcvttss2si %xmm0, %rcx
452 ; AVX2-NEXT: vmovq %rdx, %xmm0
453 ; AVX2-NEXT: movq %rcx, %rdx
454 ; AVX2-NEXT: sarq $63, %rdx
455 ; AVX2-NEXT: andq %rax, %rdx
456 ; AVX2-NEXT: orq %rcx, %rdx
457 ; AVX2-NEXT: vmovq %rdx, %xmm1
458 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
459 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
460 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
461 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
462 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
463 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372041149743102,9223372041149743102,9223372041149743102,9223372041149743102]
464 ; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
465 ; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
466 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
467 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
468 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
469 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
470 ; AVX2-NEXT: vzeroupper
473 ; AVX512-LABEL: utest_f32i32:
474 ; AVX512: # %bb.0: # %entry
475 ; AVX512-NEXT: vcvttps2uqq %xmm0, %ymm0
476 ; AVX512-NEXT: vpmovusqd %ymm0, %xmm0
477 ; AVX512-NEXT: vzeroupper
480 %conv = fptoui <4 x float> %x to <4 x i64>
481 %0 = icmp ult <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
482 %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
483 %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32>
487 define <4 x i32> @ustest_f32i32(<4 x float> %x) nounwind {
488 ; SSE-LABEL: ustest_f32i32:
489 ; SSE: # %bb.0: # %entry
490 ; SSE-NEXT: movaps %xmm0, %xmm1
491 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
492 ; SSE-NEXT: cvttss2si %xmm1, %rax
493 ; SSE-NEXT: movq %rax, %xmm1
494 ; SSE-NEXT: movaps %xmm0, %xmm2
495 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
496 ; SSE-NEXT: cvttss2si %xmm2, %rax
497 ; SSE-NEXT: movq %rax, %xmm2
498 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
499 ; SSE-NEXT: cvttss2si %xmm0, %rax
500 ; SSE-NEXT: movq %rax, %xmm4
501 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
502 ; SSE-NEXT: cvttss2si %xmm0, %rax
503 ; SSE-NEXT: movq %rax, %xmm0
504 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
505 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
506 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
507 ; SSE-NEXT: movdqa %xmm4, %xmm1
508 ; SSE-NEXT: pxor %xmm0, %xmm1
509 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
510 ; SSE-NEXT: pxor %xmm6, %xmm6
511 ; SSE-NEXT: pcmpeqd %xmm6, %xmm5
512 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [2147483647,2147483647]
513 ; SSE-NEXT: movdqa %xmm7, %xmm8
514 ; SSE-NEXT: pcmpgtd %xmm1, %xmm8
515 ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm8[0,0,2,2]
516 ; SSE-NEXT: pand %xmm5, %xmm9
517 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm8[1,1,3,3]
518 ; SSE-NEXT: por %xmm9, %xmm1
519 ; SSE-NEXT: pand %xmm1, %xmm4
520 ; SSE-NEXT: pandn %xmm3, %xmm1
521 ; SSE-NEXT: por %xmm4, %xmm1
522 ; SSE-NEXT: movdqa %xmm2, %xmm4
523 ; SSE-NEXT: pxor %xmm0, %xmm4
524 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
525 ; SSE-NEXT: pcmpeqd %xmm6, %xmm5
526 ; SSE-NEXT: pcmpgtd %xmm4, %xmm7
527 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,0,2,2]
528 ; SSE-NEXT: pand %xmm5, %xmm4
529 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
530 ; SSE-NEXT: por %xmm4, %xmm5
531 ; SSE-NEXT: pand %xmm5, %xmm2
532 ; SSE-NEXT: pandn %xmm3, %xmm5
533 ; SSE-NEXT: por %xmm2, %xmm5
534 ; SSE-NEXT: movdqa %xmm5, %xmm2
535 ; SSE-NEXT: pxor %xmm0, %xmm2
536 ; SSE-NEXT: movdqa %xmm2, %xmm3
537 ; SSE-NEXT: pcmpgtd %xmm0, %xmm3
538 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
539 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
540 ; SSE-NEXT: pand %xmm3, %xmm2
541 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
542 ; SSE-NEXT: por %xmm2, %xmm3
543 ; SSE-NEXT: pand %xmm5, %xmm3
544 ; SSE-NEXT: movdqa %xmm1, %xmm2
545 ; SSE-NEXT: pxor %xmm0, %xmm2
546 ; SSE-NEXT: movdqa %xmm2, %xmm4
547 ; SSE-NEXT: pcmpgtd %xmm0, %xmm4
548 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
549 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
550 ; SSE-NEXT: pand %xmm4, %xmm2
551 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
552 ; SSE-NEXT: por %xmm2, %xmm0
553 ; SSE-NEXT: pand %xmm1, %xmm0
554 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
557 ; AVX2-LABEL: ustest_f32i32:
558 ; AVX2: # %bb.0: # %entry
559 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
560 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
561 ; AVX2-NEXT: vmovq %rax, %xmm1
562 ; AVX2-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
563 ; AVX2-NEXT: vcvttss2si %xmm2, %rax
564 ; AVX2-NEXT: vmovq %rax, %xmm2
565 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
566 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
567 ; AVX2-NEXT: vmovq %rax, %xmm2
568 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
569 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
570 ; AVX2-NEXT: vmovq %rax, %xmm0
571 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
572 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
573 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
574 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
575 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
576 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
577 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm1
578 ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0
579 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,2,4,6,0,0,0,0]
580 ; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
581 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
582 ; AVX2-NEXT: vzeroupper
585 ; AVX512-LABEL: ustest_f32i32:
586 ; AVX512: # %bb.0: # %entry
587 ; AVX512-NEXT: vcvttps2qq %xmm0, %ymm0
588 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
589 ; AVX512-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
590 ; AVX512-NEXT: vpmovusqd %ymm0, %xmm0
591 ; AVX512-NEXT: vzeroupper
594 %conv = fptosi <4 x float> %x to <4 x i64>
595 %0 = icmp slt <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
596 %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
597 %1 = icmp sgt <4 x i64> %spec.store.select, zeroinitializer
598 %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> zeroinitializer
599 %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
603 define <4 x i32> @stest_f16i32(<4 x half> %x) nounwind {
604 ; SSE-LABEL: stest_f16i32:
605 ; SSE: # %bb.0: # %entry
606 ; SSE-NEXT: subq $72, %rsp
607 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
608 ; SSE-NEXT: movdqa %xmm0, %xmm1
609 ; SSE-NEXT: psrld $16, %xmm1
610 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
611 ; SSE-NEXT: movdqa %xmm0, %xmm1
612 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
613 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
614 ; SSE-NEXT: psrlq $48, %xmm0
615 ; SSE-NEXT: callq __extendhfsf2@PLT
616 ; SSE-NEXT: cvttss2si %xmm0, %rax
617 ; SSE-NEXT: movq %rax, %xmm0
618 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
619 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
620 ; SSE-NEXT: callq __extendhfsf2@PLT
621 ; SSE-NEXT: cvttss2si %xmm0, %rax
622 ; SSE-NEXT: movq %rax, %xmm0
623 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
624 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
625 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
626 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
627 ; SSE-NEXT: callq __extendhfsf2@PLT
628 ; SSE-NEXT: cvttss2si %xmm0, %rax
629 ; SSE-NEXT: movq %rax, %xmm0
630 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
631 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
632 ; SSE-NEXT: callq __extendhfsf2@PLT
633 ; SSE-NEXT: cvttss2si %xmm0, %rax
634 ; SSE-NEXT: movq %rax, %xmm0
635 ; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
636 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
637 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647]
638 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
639 ; SSE-NEXT: movdqa %xmm3, %xmm1
640 ; SSE-NEXT: movdqa %xmm3, %xmm8
641 ; SSE-NEXT: pxor %xmm0, %xmm1
642 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
643 ; SSE-NEXT: pxor %xmm4, %xmm4
644 ; SSE-NEXT: pcmpeqd %xmm4, %xmm3
645 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [4294967295,4294967295]
646 ; SSE-NEXT: movdqa %xmm5, %xmm6
647 ; SSE-NEXT: pcmpgtd %xmm1, %xmm6
648 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
649 ; SSE-NEXT: pand %xmm3, %xmm7
650 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,3,3]
651 ; SSE-NEXT: por %xmm7, %xmm1
652 ; SSE-NEXT: pand %xmm1, %xmm8
653 ; SSE-NEXT: pandn %xmm2, %xmm1
654 ; SSE-NEXT: por %xmm8, %xmm1
655 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
656 ; SSE-NEXT: movdqa %xmm7, %xmm3
657 ; SSE-NEXT: pxor %xmm0, %xmm3
658 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,3,3]
659 ; SSE-NEXT: pcmpeqd %xmm4, %xmm6
660 ; SSE-NEXT: pcmpgtd %xmm3, %xmm5
661 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,0,2,2]
662 ; SSE-NEXT: pand %xmm6, %xmm3
663 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
664 ; SSE-NEXT: por %xmm3, %xmm4
665 ; SSE-NEXT: movdqa %xmm7, %xmm3
666 ; SSE-NEXT: pand %xmm4, %xmm3
667 ; SSE-NEXT: pandn %xmm2, %xmm4
668 ; SSE-NEXT: por %xmm3, %xmm4
669 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [18446744071562067968,18446744071562067968]
670 ; SSE-NEXT: movdqa %xmm4, %xmm3
671 ; SSE-NEXT: pxor %xmm0, %xmm3
672 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
673 ; SSE-NEXT: pcmpeqd %xmm6, %xmm6
674 ; SSE-NEXT: pcmpeqd %xmm6, %xmm5
675 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [18446744069414584320,18446744069414584320]
676 ; SSE-NEXT: pcmpgtd %xmm7, %xmm3
677 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
678 ; SSE-NEXT: pand %xmm5, %xmm8
679 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
680 ; SSE-NEXT: por %xmm8, %xmm3
681 ; SSE-NEXT: pand %xmm3, %xmm4
682 ; SSE-NEXT: pandn %xmm2, %xmm3
683 ; SSE-NEXT: por %xmm4, %xmm3
684 ; SSE-NEXT: pxor %xmm1, %xmm0
685 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
686 ; SSE-NEXT: pcmpeqd %xmm6, %xmm4
687 ; SSE-NEXT: pcmpgtd %xmm7, %xmm0
688 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
689 ; SSE-NEXT: pand %xmm4, %xmm5
690 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
691 ; SSE-NEXT: por %xmm5, %xmm0
692 ; SSE-NEXT: pand %xmm0, %xmm1
693 ; SSE-NEXT: pandn %xmm2, %xmm0
694 ; SSE-NEXT: por %xmm1, %xmm0
695 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
696 ; SSE-NEXT: addq $72, %rsp
699 ; AVX2-LABEL: stest_f16i32:
700 ; AVX2: # %bb.0: # %entry
701 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
702 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
703 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
704 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
705 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
706 ; AVX2-NEXT: vcvttss2si %xmm1, %rcx
707 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
708 ; AVX2-NEXT: vmovq %rax, %xmm2
709 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
710 ; AVX2-NEXT: vmovq %rcx, %xmm1
711 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
712 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
713 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
714 ; AVX2-NEXT: vmovq %rax, %xmm2
715 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
716 ; AVX2-NEXT: vmovq %rax, %xmm0
717 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
718 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
719 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
720 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
721 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
722 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
723 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
724 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
725 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
726 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
727 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
728 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
729 ; AVX2-NEXT: vzeroupper
732 ; AVX512-LABEL: stest_f16i32:
733 ; AVX512: # %bb.0: # %entry
734 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
735 ; AVX512-NEXT: vcvttps2qq %ymm0, %zmm0
736 ; AVX512-NEXT: vpmovsqd %ymm0, %xmm0
737 ; AVX512-NEXT: vzeroupper
740 %conv = fptosi <4 x half> %x to <4 x i64>
741 %0 = icmp slt <4 x i64> %conv, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
742 %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
743 %1 = icmp sgt <4 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
744 %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
745 %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
749 define <4 x i32> @utesth_f16i32(<4 x half> %x) nounwind {
750 ; SSE-LABEL: utesth_f16i32:
751 ; SSE: # %bb.0: # %entry
752 ; SSE-NEXT: subq $72, %rsp
753 ; SSE-NEXT: movaps %xmm0, %xmm1
754 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
755 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
756 ; SSE-NEXT: movaps %xmm0, %xmm1
757 ; SSE-NEXT: psrlq $48, %xmm1
758 ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
759 ; SSE-NEXT: movaps %xmm0, %xmm1
760 ; SSE-NEXT: psrld $16, %xmm1
761 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
762 ; SSE-NEXT: callq __extendhfsf2@PLT
763 ; SSE-NEXT: cvttss2si %xmm0, %rax
764 ; SSE-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
765 ; SSE-NEXT: cvttss2si %xmm0, %rcx
766 ; SSE-NEXT: movq %rax, %rdx
767 ; SSE-NEXT: sarq $63, %rdx
768 ; SSE-NEXT: andq %rcx, %rdx
769 ; SSE-NEXT: orq %rax, %rdx
770 ; SSE-NEXT: movq %rdx, %xmm0
771 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
772 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
773 ; SSE-NEXT: callq __extendhfsf2@PLT
774 ; SSE-NEXT: cvttss2si %xmm0, %rax
775 ; SSE-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
776 ; SSE-NEXT: cvttss2si %xmm0, %rcx
777 ; SSE-NEXT: movq %rax, %rdx
778 ; SSE-NEXT: sarq $63, %rdx
779 ; SSE-NEXT: andq %rcx, %rdx
780 ; SSE-NEXT: orq %rax, %rdx
781 ; SSE-NEXT: movq %rdx, %xmm0
782 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
783 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
784 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
785 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
786 ; SSE-NEXT: callq __extendhfsf2@PLT
787 ; SSE-NEXT: cvttss2si %xmm0, %rax
788 ; SSE-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
789 ; SSE-NEXT: cvttss2si %xmm0, %rcx
790 ; SSE-NEXT: movq %rax, %rdx
791 ; SSE-NEXT: sarq $63, %rdx
792 ; SSE-NEXT: andq %rcx, %rdx
793 ; SSE-NEXT: orq %rax, %rdx
794 ; SSE-NEXT: movq %rdx, %xmm0
795 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
796 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
797 ; SSE-NEXT: callq __extendhfsf2@PLT
798 ; SSE-NEXT: cvttss2si %xmm0, %rax
799 ; SSE-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
800 ; SSE-NEXT: cvttss2si %xmm0, %rcx
801 ; SSE-NEXT: movq %rax, %rdx
802 ; SSE-NEXT: sarq $63, %rdx
803 ; SSE-NEXT: andq %rcx, %rdx
804 ; SSE-NEXT: orq %rax, %rdx
805 ; SSE-NEXT: movq %rdx, %xmm0
806 ; SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload
807 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
808 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259456,9223372039002259456]
809 ; SSE-NEXT: movdqa %xmm0, %xmm2
810 ; SSE-NEXT: pxor %xmm1, %xmm2
811 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
812 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2147483647,2147483647,2147483647,2147483647]
813 ; SSE-NEXT: movdqa %xmm4, %xmm5
814 ; SSE-NEXT: pcmpgtd %xmm3, %xmm5
815 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
816 ; SSE-NEXT: pcmpeqd %xmm1, %xmm2
817 ; SSE-NEXT: pand %xmm5, %xmm2
818 ; SSE-NEXT: pcmpeqd %xmm3, %xmm3
819 ; SSE-NEXT: pand %xmm2, %xmm0
820 ; SSE-NEXT: pxor %xmm3, %xmm2
821 ; SSE-NEXT: por %xmm0, %xmm2
822 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
823 ; SSE-NEXT: movdqa %xmm6, %xmm0
824 ; SSE-NEXT: pxor %xmm1, %xmm0
825 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
826 ; SSE-NEXT: pcmpgtd %xmm5, %xmm4
827 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
828 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
829 ; SSE-NEXT: pand %xmm4, %xmm0
830 ; SSE-NEXT: pxor %xmm0, %xmm3
831 ; SSE-NEXT: pand %xmm6, %xmm0
832 ; SSE-NEXT: por %xmm3, %xmm0
833 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
834 ; SSE-NEXT: addq $72, %rsp
837 ; AVX2-LABEL: utesth_f16i32:
838 ; AVX2: # %bb.0: # %entry
839 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
840 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
841 ; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
842 ; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
843 ; AVX2-NEXT: vcvttss2si %xmm3, %rax
844 ; AVX2-NEXT: vcvttss2si %xmm2, %rcx
845 ; AVX2-NEXT: movq %rcx, %rdx
846 ; AVX2-NEXT: sarq $63, %rdx
847 ; AVX2-NEXT: andq %rax, %rdx
848 ; AVX2-NEXT: orq %rcx, %rdx
849 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
850 ; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
851 ; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
852 ; AVX2-NEXT: vcvttss2si %xmm3, %rax
853 ; AVX2-NEXT: vcvttss2si %xmm2, %rcx
854 ; AVX2-NEXT: vmovq %rdx, %xmm2
855 ; AVX2-NEXT: movq %rcx, %rdx
856 ; AVX2-NEXT: sarq $63, %rdx
857 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
858 ; AVX2-NEXT: andq %rax, %rdx
859 ; AVX2-NEXT: vsubss %xmm1, %xmm3, %xmm4
860 ; AVX2-NEXT: vcvttss2si %xmm4, %rax
861 ; AVX2-NEXT: orq %rcx, %rdx
862 ; AVX2-NEXT: vmovq %rdx, %xmm4
863 ; AVX2-NEXT: vcvttss2si %xmm3, %rcx
864 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
865 ; AVX2-NEXT: movq %rcx, %rdx
866 ; AVX2-NEXT: sarq $63, %rdx
867 ; AVX2-NEXT: andq %rax, %rdx
868 ; AVX2-NEXT: orq %rcx, %rdx
869 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
870 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
871 ; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm1
872 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
873 ; AVX2-NEXT: vcvttss2si %xmm0, %rcx
874 ; AVX2-NEXT: vmovq %rdx, %xmm0
875 ; AVX2-NEXT: movq %rcx, %rdx
876 ; AVX2-NEXT: sarq $63, %rdx
877 ; AVX2-NEXT: andq %rax, %rdx
878 ; AVX2-NEXT: orq %rcx, %rdx
879 ; AVX2-NEXT: vmovq %rdx, %xmm1
880 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
881 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
882 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
883 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
884 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
885 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372041149743102,9223372041149743102,9223372041149743102,9223372041149743102]
886 ; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
887 ; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
888 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
889 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
890 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
891 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
892 ; AVX2-NEXT: vzeroupper
895 ; AVX512-LABEL: utesth_f16i32:
896 ; AVX512: # %bb.0: # %entry
897 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
898 ; AVX512-NEXT: vcvttps2uqq %ymm0, %zmm0
899 ; AVX512-NEXT: vpmovusqd %ymm0, %xmm0
900 ; AVX512-NEXT: vzeroupper
903 %conv = fptoui <4 x half> %x to <4 x i64>
904 %0 = icmp ult <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
905 %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
906 %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32>
910 define <4 x i32> @ustest_f16i32(<4 x half> %x) nounwind {
911 ; SSE-LABEL: ustest_f16i32:
912 ; SSE: # %bb.0: # %entry
913 ; SSE-NEXT: subq $72, %rsp
914 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
915 ; SSE-NEXT: movdqa %xmm0, %xmm1
916 ; SSE-NEXT: psrld $16, %xmm1
917 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
918 ; SSE-NEXT: movdqa %xmm0, %xmm1
919 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
920 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
921 ; SSE-NEXT: psrlq $48, %xmm0
922 ; SSE-NEXT: callq __extendhfsf2@PLT
923 ; SSE-NEXT: cvttss2si %xmm0, %rax
924 ; SSE-NEXT: movq %rax, %xmm0
925 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
926 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
927 ; SSE-NEXT: callq __extendhfsf2@PLT
928 ; SSE-NEXT: cvttss2si %xmm0, %rax
929 ; SSE-NEXT: movq %rax, %xmm0
930 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
931 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
932 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
933 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
934 ; SSE-NEXT: callq __extendhfsf2@PLT
935 ; SSE-NEXT: cvttss2si %xmm0, %rax
936 ; SSE-NEXT: movq %rax, %xmm0
937 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
938 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
939 ; SSE-NEXT: callq __extendhfsf2@PLT
940 ; SSE-NEXT: cvttss2si %xmm0, %rax
941 ; SSE-NEXT: movq %rax, %xmm0
942 ; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
943 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
944 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
945 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
946 ; SSE-NEXT: movdqa %xmm3, %xmm1
947 ; SSE-NEXT: movdqa %xmm3, %xmm8
948 ; SSE-NEXT: pxor %xmm0, %xmm1
949 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
950 ; SSE-NEXT: pxor %xmm4, %xmm4
951 ; SSE-NEXT: pcmpeqd %xmm4, %xmm3
952 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [2147483647,2147483647]
953 ; SSE-NEXT: movdqa %xmm5, %xmm6
954 ; SSE-NEXT: pcmpgtd %xmm1, %xmm6
955 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
956 ; SSE-NEXT: pand %xmm3, %xmm7
957 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[1,1,3,3]
958 ; SSE-NEXT: por %xmm7, %xmm1
959 ; SSE-NEXT: pand %xmm1, %xmm8
960 ; SSE-NEXT: pandn %xmm2, %xmm1
961 ; SSE-NEXT: por %xmm8, %xmm1
962 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
963 ; SSE-NEXT: movdqa %xmm7, %xmm3
964 ; SSE-NEXT: pxor %xmm0, %xmm3
965 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,3,3]
966 ; SSE-NEXT: pcmpeqd %xmm4, %xmm6
967 ; SSE-NEXT: pcmpgtd %xmm3, %xmm5
968 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,0,2,2]
969 ; SSE-NEXT: pand %xmm6, %xmm3
970 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
971 ; SSE-NEXT: por %xmm3, %xmm4
972 ; SSE-NEXT: movdqa %xmm7, %xmm3
973 ; SSE-NEXT: pand %xmm4, %xmm3
974 ; SSE-NEXT: pandn %xmm2, %xmm4
975 ; SSE-NEXT: por %xmm3, %xmm4
976 ; SSE-NEXT: movdqa %xmm4, %xmm2
977 ; SSE-NEXT: pxor %xmm0, %xmm2
978 ; SSE-NEXT: movdqa %xmm2, %xmm3
979 ; SSE-NEXT: pcmpgtd %xmm0, %xmm3
980 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
981 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
982 ; SSE-NEXT: pand %xmm3, %xmm2
983 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
984 ; SSE-NEXT: por %xmm2, %xmm3
985 ; SSE-NEXT: pand %xmm4, %xmm3
986 ; SSE-NEXT: movdqa %xmm1, %xmm2
987 ; SSE-NEXT: pxor %xmm0, %xmm2
988 ; SSE-NEXT: movdqa %xmm2, %xmm4
989 ; SSE-NEXT: pcmpgtd %xmm0, %xmm4
990 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
991 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
992 ; SSE-NEXT: pand %xmm4, %xmm2
993 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
994 ; SSE-NEXT: por %xmm2, %xmm0
995 ; SSE-NEXT: pand %xmm1, %xmm0
996 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
997 ; SSE-NEXT: addq $72, %rsp
1000 ; AVX2-LABEL: ustest_f16i32:
1001 ; AVX2: # %bb.0: # %entry
1002 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
1003 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
1004 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
1005 ; AVX2-NEXT: vmovq %rax, %xmm1
1006 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
1007 ; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
1008 ; AVX2-NEXT: vcvttss2si %xmm2, %rax
1009 ; AVX2-NEXT: vmovq %rax, %xmm2
1010 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
1011 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
1012 ; AVX2-NEXT: vcvttss2si %xmm2, %rax
1013 ; AVX2-NEXT: vmovq %rax, %xmm2
1014 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
1015 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
1016 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
1017 ; AVX2-NEXT: vmovq %rax, %xmm0
1018 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
1019 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
1020 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
1021 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
1022 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
1023 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
1024 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm1
1025 ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0
1026 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,2,4,6,0,0,0,0]
1027 ; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
1028 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1029 ; AVX2-NEXT: vzeroupper
1032 ; AVX512-LABEL: ustest_f16i32:
1033 ; AVX512: # %bb.0: # %entry
1034 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
1035 ; AVX512-NEXT: vcvttps2qq %ymm0, %zmm0
1036 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
1037 ; AVX512-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
1038 ; AVX512-NEXT: vpmovusqd %ymm0, %xmm0
1039 ; AVX512-NEXT: vzeroupper
1042 %conv = fptosi <4 x half> %x to <4 x i64>
1043 %0 = icmp slt <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
1044 %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
1045 %1 = icmp sgt <4 x i64> %spec.store.select, zeroinitializer
1046 %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> zeroinitializer
1047 %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
1048 ret <4 x i32> %conv6
1053 define <2 x i16> @stest_f64i16(<2 x double> %x) nounwind {
1054 ; SSE-LABEL: stest_f64i16:
1055 ; SSE: # %bb.0: # %entry
1056 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
1057 ; SSE-NEXT: packssdw %xmm0, %xmm0
1060 ; AVX-LABEL: stest_f64i16:
1061 ; AVX: # %bb.0: # %entry
1062 ; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
1063 ; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
1066 %conv = fptosi <2 x double> %x to <2 x i32>
1067 %0 = icmp slt <2 x i32> %conv, <i32 32767, i32 32767>
1068 %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 32767, i32 32767>
1069 %1 = icmp sgt <2 x i32> %spec.store.select, <i32 -32768, i32 -32768>
1070 %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> <i32 -32768, i32 -32768>
1071 %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16>
1072 ret <2 x i16> %conv6
1075 define <2 x i16> @utest_f64i16(<2 x double> %x) nounwind {
1076 ; SSE-LABEL: utest_f64i16:
1077 ; SSE: # %bb.0: # %entry
1078 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm1
1079 ; SSE-NEXT: movapd %xmm1, %xmm2
1080 ; SSE-NEXT: psrad $31, %xmm2
1081 ; SSE-NEXT: addpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1082 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
1083 ; SSE-NEXT: andpd %xmm2, %xmm0
1084 ; SSE-NEXT: orpd %xmm1, %xmm0
1085 ; SSE-NEXT: movapd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
1086 ; SSE-NEXT: xorpd %xmm0, %xmm1
1087 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
1088 ; SSE-NEXT: movdqa %xmm1, %xmm2
1089 ; SSE-NEXT: pandn %xmm0, %xmm2
1090 ; SSE-NEXT: psrld $16, %xmm1
1091 ; SSE-NEXT: por %xmm2, %xmm1
1092 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
1095 ; AVX2-LABEL: utest_f64i16:
1096 ; AVX2: # %bb.0: # %entry
1097 ; AVX2-NEXT: vcvttpd2dq %xmm0, %xmm1
1098 ; AVX2-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1099 ; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
1100 ; AVX2-NEXT: vcvttpd2dq %xmm0, %xmm0
1101 ; AVX2-NEXT: vandpd %xmm2, %xmm0, %xmm0
1102 ; AVX2-NEXT: vorpd %xmm0, %xmm1, %xmm0
1103 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65535,65535,65535,65535]
1104 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
1105 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
1108 ; AVX512-LABEL: utest_f64i16:
1109 ; AVX512: # %bb.0: # %entry
1110 ; AVX512-NEXT: vcvttpd2udq %xmm0, %xmm0
1111 ; AVX512-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
1112 ; AVX512-NEXT: vpmovdw %xmm0, %xmm0
1115 %conv = fptoui <2 x double> %x to <2 x i32>
1116 %0 = icmp ult <2 x i32> %conv, <i32 65535, i32 65535>
1117 %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>
1118 %conv6 = trunc <2 x i32> %spec.store.select to <2 x i16>
1119 ret <2 x i16> %conv6
1122 define <2 x i16> @ustest_f64i16(<2 x double> %x) nounwind {
1123 ; SSE-LABEL: ustest_f64i16:
1124 ; SSE: # %bb.0: # %entry
1125 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
1126 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,u,u]
1127 ; SSE-NEXT: movdqa %xmm1, %xmm2
1128 ; SSE-NEXT: pcmpgtd %xmm0, %xmm2
1129 ; SSE-NEXT: pand %xmm2, %xmm0
1130 ; SSE-NEXT: pandn %xmm1, %xmm2
1131 ; SSE-NEXT: por %xmm0, %xmm2
1132 ; SSE-NEXT: pxor %xmm0, %xmm0
1133 ; SSE-NEXT: movdqa %xmm2, %xmm1
1134 ; SSE-NEXT: pcmpgtd %xmm0, %xmm1
1135 ; SSE-NEXT: pand %xmm2, %xmm1
1136 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
1139 ; AVX-LABEL: ustest_f64i16:
1140 ; AVX: # %bb.0: # %entry
1141 ; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
1142 ; AVX-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
1145 %conv = fptosi <2 x double> %x to <2 x i32>
1146 %0 = icmp slt <2 x i32> %conv, <i32 65535, i32 65535>
1147 %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>
1148 %1 = icmp sgt <2 x i32> %spec.store.select, zeroinitializer
1149 %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> zeroinitializer
1150 %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16>
1151 ret <2 x i16> %conv6
1154 define <4 x i16> @stest_f32i16(<4 x float> %x) nounwind {
1155 ; SSE-LABEL: stest_f32i16:
1156 ; SSE: # %bb.0: # %entry
1157 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1158 ; SSE-NEXT: packssdw %xmm0, %xmm0
1161 ; AVX-LABEL: stest_f32i16:
1162 ; AVX: # %bb.0: # %entry
1163 ; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
1164 ; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
1167 %conv = fptosi <4 x float> %x to <4 x i32>
1168 %0 = icmp slt <4 x i32> %conv, <i32 32767, i32 32767, i32 32767, i32 32767>
1169 %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
1170 %1 = icmp sgt <4 x i32> %spec.store.select, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
1171 %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
1172 %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16>
1173 ret <4 x i16> %conv6
1176 define <4 x i16> @utest_f32i16(<4 x float> %x) nounwind {
1177 ; SSE-LABEL: utest_f32i16:
1178 ; SSE: # %bb.0: # %entry
1179 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
1180 ; SSE-NEXT: movdqa %xmm1, %xmm2
1181 ; SSE-NEXT: psrad $31, %xmm2
1182 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1183 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1184 ; SSE-NEXT: pand %xmm2, %xmm0
1185 ; SSE-NEXT: por %xmm1, %xmm0
1186 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
1187 ; SSE-NEXT: pxor %xmm0, %xmm1
1188 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
1189 ; SSE-NEXT: movdqa %xmm1, %xmm2
1190 ; SSE-NEXT: pandn %xmm0, %xmm2
1191 ; SSE-NEXT: psrld $16, %xmm1
1192 ; SSE-NEXT: por %xmm2, %xmm1
1193 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
1194 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
1195 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1198 ; AVX2-LABEL: utest_f32i16:
1199 ; AVX2: # %bb.0: # %entry
1200 ; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
1201 ; AVX2-NEXT: vsubps %xmm1, %xmm0, %xmm1
1202 ; AVX2-NEXT: vcvttps2dq %xmm1, %xmm1
1203 ; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
1204 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2
1205 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
1206 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
1207 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65535,65535,65535,65535]
1208 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
1209 ; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
1212 ; AVX512-LABEL: utest_f32i16:
1213 ; AVX512: # %bb.0: # %entry
1214 ; AVX512-NEXT: vcvttps2udq %xmm0, %xmm0
1215 ; AVX512-NEXT: vpmovusdw %xmm0, %xmm0
1218 %conv = fptoui <4 x float> %x to <4 x i32>
1219 %0 = icmp ult <4 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535>
1220 %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
1221 %conv6 = trunc <4 x i32> %spec.store.select to <4 x i16>
1222 ret <4 x i16> %conv6
1225 define <4 x i16> @ustest_f32i16(<4 x float> %x) nounwind {
1226 ; SSE-LABEL: ustest_f32i16:
1227 ; SSE: # %bb.0: # %entry
1228 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1229 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
1230 ; SSE-NEXT: movdqa %xmm1, %xmm2
1231 ; SSE-NEXT: pcmpgtd %xmm0, %xmm2
1232 ; SSE-NEXT: pand %xmm2, %xmm0
1233 ; SSE-NEXT: pandn %xmm1, %xmm2
1234 ; SSE-NEXT: por %xmm0, %xmm2
1235 ; SSE-NEXT: pxor %xmm0, %xmm0
1236 ; SSE-NEXT: movdqa %xmm2, %xmm1
1237 ; SSE-NEXT: pcmpgtd %xmm0, %xmm1
1238 ; SSE-NEXT: pand %xmm2, %xmm1
1239 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
1240 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
1241 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
1244 ; AVX-LABEL: ustest_f32i16:
1245 ; AVX: # %bb.0: # %entry
1246 ; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
1247 ; AVX-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
1250 %conv = fptosi <4 x float> %x to <4 x i32>
1251 %0 = icmp slt <4 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535>
1252 %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
1253 %1 = icmp sgt <4 x i32> %spec.store.select, zeroinitializer
1254 %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> zeroinitializer
1255 %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16>
1256 ret <4 x i16> %conv6
1259 define <8 x i16> @stest_f16i16(<8 x half> %x) nounwind {
1260 ; SSE-LABEL: stest_f16i16:
1261 ; SSE: # %bb.0: # %entry
1262 ; SSE-NEXT: subq $72, %rsp
1263 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
1264 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1265 ; SSE-NEXT: callq __extendhfsf2@PLT
1266 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1267 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1268 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
1269 ; SSE-NEXT: callq __extendhfsf2@PLT
1270 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1271 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1272 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1273 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1274 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1275 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1276 ; SSE-NEXT: callq __extendhfsf2@PLT
1277 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1278 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1279 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
1280 ; SSE-NEXT: callq __extendhfsf2@PLT
1281 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1282 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1283 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1284 ; SSE-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1285 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
1286 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1287 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1288 ; SSE-NEXT: psrlq $48, %xmm0
1289 ; SSE-NEXT: callq __extendhfsf2@PLT
1290 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1291 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1292 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
1293 ; SSE-NEXT: callq __extendhfsf2@PLT
1294 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1295 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1296 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1297 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1298 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1299 ; SSE-NEXT: callq __extendhfsf2@PLT
1300 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1301 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1302 ; SSE-NEXT: psrld $16, %xmm0
1303 ; SSE-NEXT: callq __extendhfsf2@PLT
1304 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1305 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1306 ; SSE-NEXT: cvttps2dq %xmm1, %xmm0
1307 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1308 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
1309 ; SSE-NEXT: packssdw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1310 ; SSE-NEXT: addq $72, %rsp
1313 ; AVX2-LABEL: stest_f16i16:
1314 ; AVX2: # %bb.0: # %entry
1315 ; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
1316 ; AVX2-NEXT: vcvttps2dq %ymm0, %ymm0
1317 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1318 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
1319 ; AVX2-NEXT: vzeroupper
1322 ; AVX512-LABEL: stest_f16i16:
1323 ; AVX512: # %bb.0: # %entry
1324 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
1325 ; AVX512-NEXT: vcvttps2dq %ymm0, %ymm0
1326 ; AVX512-NEXT: vpmovsdw %ymm0, %xmm0
1327 ; AVX512-NEXT: vzeroupper
1330 %conv = fptosi <8 x half> %x to <8 x i32>
1331 %0 = icmp slt <8 x i32> %conv, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
1332 %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
1333 %1 = icmp sgt <8 x i32> %spec.store.select, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
1334 %spec.store.select7 = select <8 x i1> %1, <8 x i32> %spec.store.select, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
1335 %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16>
1336 ret <8 x i16> %conv6
1339 define <8 x i16> @utesth_f16i16(<8 x half> %x) nounwind {
1340 ; SSE-LABEL: utesth_f16i16:
1341 ; SSE: # %bb.0: # %entry
1342 ; SSE-NEXT: subq $72, %rsp
1343 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
1344 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1345 ; SSE-NEXT: callq __extendhfsf2@PLT
1346 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1347 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1348 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
1349 ; SSE-NEXT: callq __extendhfsf2@PLT
1350 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1351 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1352 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
1353 ; SSE-NEXT: movdqa %xmm1, %xmm2
1354 ; SSE-NEXT: psrad $31, %xmm2
1355 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1356 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1357 ; SSE-NEXT: pand %xmm2, %xmm0
1358 ; SSE-NEXT: por %xmm1, %xmm0
1359 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1360 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1361 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1362 ; SSE-NEXT: callq __extendhfsf2@PLT
1363 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1364 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1365 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
1366 ; SSE-NEXT: callq __extendhfsf2@PLT
1367 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1368 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1369 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
1370 ; SSE-NEXT: movdqa %xmm1, %xmm2
1371 ; SSE-NEXT: psrad $31, %xmm2
1372 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1373 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1374 ; SSE-NEXT: pand %xmm2, %xmm0
1375 ; SSE-NEXT: por %xmm1, %xmm0
1376 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1377 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
1378 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1379 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1380 ; SSE-NEXT: psrlq $48, %xmm0
1381 ; SSE-NEXT: callq __extendhfsf2@PLT
1382 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1383 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1384 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
1385 ; SSE-NEXT: callq __extendhfsf2@PLT
1386 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1387 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1388 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
1389 ; SSE-NEXT: movdqa %xmm1, %xmm2
1390 ; SSE-NEXT: psrad $31, %xmm2
1391 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1392 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1393 ; SSE-NEXT: pand %xmm2, %xmm0
1394 ; SSE-NEXT: por %xmm1, %xmm0
1395 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1396 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1397 ; SSE-NEXT: callq __extendhfsf2@PLT
1398 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1399 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1400 ; SSE-NEXT: psrld $16, %xmm0
1401 ; SSE-NEXT: callq __extendhfsf2@PLT
1402 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1403 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
1404 ; SSE-NEXT: cvttps2dq %xmm2, %xmm0
1405 ; SSE-NEXT: movdqa %xmm0, %xmm1
1406 ; SSE-NEXT: psrad $31, %xmm1
1407 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
1408 ; SSE-NEXT: cvttps2dq %xmm2, %xmm2
1409 ; SSE-NEXT: pand %xmm1, %xmm2
1410 ; SSE-NEXT: por %xmm0, %xmm2
1411 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
1412 ; SSE-NEXT: # xmm2 = xmm2[0],mem[0]
1413 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
1414 ; SSE-NEXT: movdqa %xmm2, %xmm3
1415 ; SSE-NEXT: pxor %xmm1, %xmm3
1416 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2147549183,2147549183,2147549183,2147549183]
1417 ; SSE-NEXT: movdqa %xmm4, %xmm0
1418 ; SSE-NEXT: pcmpgtd %xmm3, %xmm0
1419 ; SSE-NEXT: pand %xmm0, %xmm2
1420 ; SSE-NEXT: pcmpeqd %xmm3, %xmm3
1421 ; SSE-NEXT: pxor %xmm3, %xmm0
1422 ; SSE-NEXT: por %xmm2, %xmm0
1423 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
1424 ; SSE-NEXT: pxor %xmm2, %xmm1
1425 ; SSE-NEXT: pcmpgtd %xmm1, %xmm4
1426 ; SSE-NEXT: pand %xmm4, %xmm2
1427 ; SSE-NEXT: pxor %xmm3, %xmm4
1428 ; SSE-NEXT: por %xmm2, %xmm4
1429 ; SSE-NEXT: pslld $16, %xmm4
1430 ; SSE-NEXT: psrad $16, %xmm4
1431 ; SSE-NEXT: pslld $16, %xmm0
1432 ; SSE-NEXT: psrad $16, %xmm0
1433 ; SSE-NEXT: packssdw %xmm4, %xmm0
1434 ; SSE-NEXT: addq $72, %rsp
1437 ; AVX2-LABEL: utesth_f16i16:
1438 ; AVX2: # %bb.0: # %entry
1439 ; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
1440 ; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
1441 ; AVX2-NEXT: vsubps %ymm1, %ymm0, %ymm1
1442 ; AVX2-NEXT: vcvttps2dq %ymm1, %ymm1
1443 ; AVX2-NEXT: vcvttps2dq %ymm0, %ymm0
1444 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm2
1445 ; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
1446 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
1447 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
1448 ; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
1449 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1450 ; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
1451 ; AVX2-NEXT: vzeroupper
1454 ; AVX512-LABEL: utesth_f16i16:
1455 ; AVX512: # %bb.0: # %entry
1456 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
1457 ; AVX512-NEXT: vcvttps2udq %ymm0, %ymm0
1458 ; AVX512-NEXT: vpmovusdw %ymm0, %xmm0
1459 ; AVX512-NEXT: vzeroupper
1462 %conv = fptoui <8 x half> %x to <8 x i32>
1463 %0 = icmp ult <8 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
1464 %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
1465 %conv6 = trunc <8 x i32> %spec.store.select to <8 x i16>
1466 ret <8 x i16> %conv6
1469 define <8 x i16> @ustest_f16i16(<8 x half> %x) nounwind {
1470 ; SSE-LABEL: ustest_f16i16:
1471 ; SSE: # %bb.0: # %entry
1472 ; SSE-NEXT: subq $72, %rsp
1473 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
1474 ; SSE-NEXT: psrlq $48, %xmm0
1475 ; SSE-NEXT: callq __extendhfsf2@PLT
1476 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1477 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1478 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
1479 ; SSE-NEXT: callq __extendhfsf2@PLT
1480 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1481 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1482 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1483 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1484 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1485 ; SSE-NEXT: callq __extendhfsf2@PLT
1486 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1487 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1488 ; SSE-NEXT: psrld $16, %xmm0
1489 ; SSE-NEXT: callq __extendhfsf2@PLT
1490 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
1491 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
1492 ; SSE-NEXT: cvttps2dq %xmm1, %xmm0
1493 ; SSE-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1494 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
1495 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1496 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1497 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1498 ; SSE-NEXT: callq __extendhfsf2@PLT
1499 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1500 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1501 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
1502 ; SSE-NEXT: callq __extendhfsf2@PLT
1503 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1504 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1505 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1506 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1507 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
1508 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1509 ; SSE-NEXT: callq __extendhfsf2@PLT
1510 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
1511 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1512 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
1513 ; SSE-NEXT: callq __extendhfsf2@PLT
1514 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1515 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
1516 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1517 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
1518 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
1519 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
1520 ; SSE-NEXT: movdqa %xmm1, %xmm2
1521 ; SSE-NEXT: pcmpgtd %xmm0, %xmm2
1522 ; SSE-NEXT: pand %xmm2, %xmm0
1523 ; SSE-NEXT: pandn %xmm1, %xmm2
1524 ; SSE-NEXT: por %xmm0, %xmm2
1525 ; SSE-NEXT: movdqa %xmm1, %xmm3
1526 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
1527 ; SSE-NEXT: pcmpgtd %xmm0, %xmm3
1528 ; SSE-NEXT: pand %xmm3, %xmm0
1529 ; SSE-NEXT: pandn %xmm1, %xmm3
1530 ; SSE-NEXT: por %xmm0, %xmm3
1531 ; SSE-NEXT: pxor %xmm1, %xmm1
1532 ; SSE-NEXT: movdqa %xmm3, %xmm0
1533 ; SSE-NEXT: pcmpgtd %xmm1, %xmm0
1534 ; SSE-NEXT: pand %xmm3, %xmm0
1535 ; SSE-NEXT: movdqa %xmm2, %xmm3
1536 ; SSE-NEXT: pcmpgtd %xmm1, %xmm3
1537 ; SSE-NEXT: pand %xmm2, %xmm3
1538 ; SSE-NEXT: pslld $16, %xmm3
1539 ; SSE-NEXT: psrad $16, %xmm3
1540 ; SSE-NEXT: pslld $16, %xmm0
1541 ; SSE-NEXT: psrad $16, %xmm0
1542 ; SSE-NEXT: packssdw %xmm3, %xmm0
1543 ; SSE-NEXT: addq $72, %rsp
1546 ; AVX2-LABEL: ustest_f16i16:
1547 ; AVX2: # %bb.0: # %entry
1548 ; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
1549 ; AVX2-NEXT: vcvttps2dq %ymm0, %ymm0
1550 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1551 ; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
1552 ; AVX2-NEXT: vzeroupper
1555 ; AVX512-LABEL: ustest_f16i16:
1556 ; AVX512: # %bb.0: # %entry
1557 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
1558 ; AVX512-NEXT: vcvttps2dq %ymm0, %ymm0
1559 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
1560 ; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
1561 ; AVX512-NEXT: vpmovusdw %ymm0, %xmm0
1562 ; AVX512-NEXT: vzeroupper
1565 %conv = fptosi <8 x half> %x to <8 x i32>
1566 %0 = icmp slt <8 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
1567 %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
1568 %1 = icmp sgt <8 x i32> %spec.store.select, zeroinitializer
1569 %spec.store.select7 = select <8 x i1> %1, <8 x i32> %spec.store.select, <8 x i32> zeroinitializer
1570 %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16>
1571 ret <8 x i16> %conv6
1576 define <2 x i8> @stest_f64i8(<2 x double> %x) nounwind {
1577 ; SSE-LABEL: stest_f64i8:
1578 ; SSE: # %bb.0: # %entry
1579 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
1580 ; SSE-NEXT: packssdw %xmm0, %xmm0
1581 ; SSE-NEXT: packsswb %xmm0, %xmm0
1584 ; AVX-LABEL: stest_f64i8:
1585 ; AVX: # %bb.0: # %entry
1586 ; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
1587 ; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
1588 ; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
1591 %conv = fptosi <2 x double> %x to <2 x i32>
1592 %0 = icmp slt <2 x i32> %conv, <i32 127, i32 127>
1593 %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 127, i32 127>
1594 %1 = icmp sgt <2 x i32> %spec.store.select, <i32 -128, i32 -128>
1595 %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> <i32 -128, i32 -128>
1596 %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i8>
1600 define <2 x i8> @utest_f64i8(<2 x double> %x) nounwind {
1601 ; SSE-LABEL: utest_f64i8:
1602 ; SSE: # %bb.0: # %entry
1603 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm1
1604 ; SSE-NEXT: movapd %xmm1, %xmm2
1605 ; SSE-NEXT: psrad $31, %xmm2
1606 ; SSE-NEXT: addpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1607 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm3
1608 ; SSE-NEXT: andpd %xmm2, %xmm3
1609 ; SSE-NEXT: orpd %xmm1, %xmm3
1610 ; SSE-NEXT: movapd {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
1611 ; SSE-NEXT: xorpd %xmm3, %xmm0
1612 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1613 ; SSE-NEXT: movdqa %xmm0, %xmm1
1614 ; SSE-NEXT: pandn %xmm3, %xmm1
1615 ; SSE-NEXT: psrld $24, %xmm0
1616 ; SSE-NEXT: por %xmm1, %xmm0
1617 ; SSE-NEXT: packuswb %xmm0, %xmm0
1618 ; SSE-NEXT: packuswb %xmm0, %xmm0
1621 ; AVX2-LABEL: utest_f64i8:
1622 ; AVX2: # %bb.0: # %entry
1623 ; AVX2-NEXT: vcvttpd2dq %xmm0, %xmm1
1624 ; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
1625 ; AVX2-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1626 ; AVX2-NEXT: vcvttpd2dq %xmm0, %xmm0
1627 ; AVX2-NEXT: vandpd %xmm2, %xmm0, %xmm0
1628 ; AVX2-NEXT: vorpd %xmm0, %xmm1, %xmm0
1629 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255]
1630 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
1631 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1634 ; AVX512-LABEL: utest_f64i8:
1635 ; AVX512: # %bb.0: # %entry
1636 ; AVX512-NEXT: vcvttpd2udq %xmm0, %xmm0
1637 ; AVX512-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
1638 ; AVX512-NEXT: vpmovdb %xmm0, %xmm0
1641 %conv = fptoui <2 x double> %x to <2 x i32>
1642 %0 = icmp ult <2 x i32> %conv, <i32 255, i32 255>
1643 %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 255, i32 255>
1644 %conv6 = trunc <2 x i32> %spec.store.select to <2 x i8>
1648 define <2 x i8> @ustest_f64i8(<2 x double> %x) nounwind {
1649 ; SSE-LABEL: ustest_f64i8:
1650 ; SSE: # %bb.0: # %entry
1651 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
1652 ; SSE-NEXT: packssdw %xmm0, %xmm0
1653 ; SSE-NEXT: packuswb %xmm0, %xmm0
1656 ; AVX-LABEL: ustest_f64i8:
1657 ; AVX: # %bb.0: # %entry
1658 ; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
1659 ; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
1660 ; AVX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
1663 %conv = fptosi <2 x double> %x to <2 x i32>
1664 %0 = icmp slt <2 x i32> %conv, <i32 255, i32 255>
1665 %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 255, i32 255>
1666 %1 = icmp sgt <2 x i32> %spec.store.select, zeroinitializer
1667 %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> zeroinitializer
1668 %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i8>
1672 define <4 x i8> @stest_f32i8(<4 x float> %x) nounwind {
1673 ; SSE-LABEL: stest_f32i8:
1674 ; SSE: # %bb.0: # %entry
1675 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1676 ; SSE-NEXT: packssdw %xmm0, %xmm0
1677 ; SSE-NEXT: packsswb %xmm0, %xmm0
1680 ; AVX-LABEL: stest_f32i8:
1681 ; AVX: # %bb.0: # %entry
1682 ; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
1683 ; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
1684 ; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
1687 %conv = fptosi <4 x float> %x to <4 x i32>
1688 %0 = icmp slt <4 x i32> %conv, <i32 127, i32 127, i32 127, i32 127>
1689 %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 127, i32 127, i32 127, i32 127>
1690 %1 = icmp sgt <4 x i32> %spec.store.select, <i32 -128, i32 -128, i32 -128, i32 -128>
1691 %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> <i32 -128, i32 -128, i32 -128, i32 -128>
1692 %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i8>
1696 define <4 x i8> @utest_f32i8(<4 x float> %x) nounwind {
1697 ; SSE-LABEL: utest_f32i8:
1698 ; SSE: # %bb.0: # %entry
1699 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
1700 ; SSE-NEXT: movdqa %xmm1, %xmm2
1701 ; SSE-NEXT: psrad $31, %xmm2
1702 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1703 ; SSE-NEXT: cvttps2dq %xmm0, %xmm3
1704 ; SSE-NEXT: pand %xmm2, %xmm3
1705 ; SSE-NEXT: por %xmm1, %xmm3
1706 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
1707 ; SSE-NEXT: pxor %xmm3, %xmm0
1708 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1709 ; SSE-NEXT: movdqa %xmm0, %xmm1
1710 ; SSE-NEXT: pandn %xmm3, %xmm1
1711 ; SSE-NEXT: psrld $24, %xmm0
1712 ; SSE-NEXT: por %xmm1, %xmm0
1713 ; SSE-NEXT: packuswb %xmm0, %xmm0
1714 ; SSE-NEXT: packuswb %xmm0, %xmm0
1717 ; AVX2-LABEL: utest_f32i8:
1718 ; AVX2: # %bb.0: # %entry
1719 ; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
1720 ; AVX2-NEXT: vsubps %xmm1, %xmm0, %xmm1
1721 ; AVX2-NEXT: vcvttps2dq %xmm1, %xmm1
1722 ; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
1723 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2
1724 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
1725 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
1726 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255]
1727 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
1728 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12]
1731 ; AVX512-LABEL: utest_f32i8:
1732 ; AVX512: # %bb.0: # %entry
1733 ; AVX512-NEXT: vcvttps2udq %xmm0, %xmm0
1734 ; AVX512-NEXT: vpmovusdb %xmm0, %xmm0
1737 %conv = fptoui <4 x float> %x to <4 x i32>
1738 %0 = icmp ult <4 x i32> %conv, <i32 255, i32 255, i32 255, i32 255>
1739 %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 255, i32 255, i32 255, i32 255>
1740 %conv6 = trunc <4 x i32> %spec.store.select to <4 x i8>
1744 define <4 x i8> @ustest_f32i8(<4 x float> %x) nounwind {
1745 ; SSE-LABEL: ustest_f32i8:
1746 ; SSE: # %bb.0: # %entry
1747 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
1748 ; SSE-NEXT: packssdw %xmm0, %xmm0
1749 ; SSE-NEXT: packuswb %xmm0, %xmm0
1752 ; AVX-LABEL: ustest_f32i8:
1753 ; AVX: # %bb.0: # %entry
1754 ; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
1755 ; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
1756 ; AVX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
1759 %conv = fptosi <4 x float> %x to <4 x i32>
1760 %0 = icmp slt <4 x i32> %conv, <i32 255, i32 255, i32 255, i32 255>
1761 %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 255, i32 255, i32 255, i32 255>
1762 %1 = icmp sgt <4 x i32> %spec.store.select, zeroinitializer
1763 %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> zeroinitializer
1764 %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i8>
1770 define <2 x i64> @stest_f64i64(<2 x double> %x) nounwind {
1771 ; SSE-LABEL: stest_f64i64:
1772 ; SSE: # %bb.0: # %entry
1773 ; SSE-NEXT: pushq %r14
1774 ; SSE-NEXT: pushq %rbx
1775 ; SSE-NEXT: subq $24, %rsp
1776 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
1777 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
1778 ; SSE-NEXT: callq __fixdfti@PLT
1779 ; SSE-NEXT: movq %rax, %rbx
1780 ; SSE-NEXT: movq %rdx, %r14
1781 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1782 ; SSE-NEXT: callq __fixdfti@PLT
1783 ; SSE-NEXT: xorl %ecx, %ecx
1784 ; SSE-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
1785 ; SSE-NEXT: cmpq %rsi, %rax
1786 ; SSE-NEXT: movq %rdx, %rdi
1787 ; SSE-NEXT: sbbq $0, %rdi
1788 ; SSE-NEXT: cmovgeq %rcx, %rdx
1789 ; SSE-NEXT: cmovgeq %rsi, %rax
1790 ; SSE-NEXT: cmpq %rsi, %rbx
1791 ; SSE-NEXT: movq %r14, %rdi
1792 ; SSE-NEXT: sbbq $0, %rdi
1793 ; SSE-NEXT: cmovlq %r14, %rcx
1794 ; SSE-NEXT: cmovlq %rbx, %rsi
1795 ; SSE-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
1796 ; SSE-NEXT: cmpq %rsi, %rdi
1797 ; SSE-NEXT: movq $-1, %r8
1798 ; SSE-NEXT: movq $-1, %r9
1799 ; SSE-NEXT: sbbq %rcx, %r9
1800 ; SSE-NEXT: cmovgeq %rdi, %rsi
1801 ; SSE-NEXT: cmpq %rax, %rdi
1802 ; SSE-NEXT: sbbq %rdx, %r8
1803 ; SSE-NEXT: cmovgeq %rdi, %rax
1804 ; SSE-NEXT: movq %rax, %xmm0
1805 ; SSE-NEXT: movq %rsi, %xmm1
1806 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1807 ; SSE-NEXT: addq $24, %rsp
1808 ; SSE-NEXT: popq %rbx
1809 ; SSE-NEXT: popq %r14
1812 ; AVX2-LABEL: stest_f64i64:
1813 ; AVX2: # %bb.0: # %entry
1814 ; AVX2-NEXT: pushq %r14
1815 ; AVX2-NEXT: pushq %rbx
1816 ; AVX2-NEXT: subq $24, %rsp
1817 ; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
1818 ; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
1819 ; AVX2-NEXT: callq __fixdfti@PLT
1820 ; AVX2-NEXT: movq %rax, %rbx
1821 ; AVX2-NEXT: movq %rdx, %r14
1822 ; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
1823 ; AVX2-NEXT: callq __fixdfti@PLT
1824 ; AVX2-NEXT: xorl %ecx, %ecx
1825 ; AVX2-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
1826 ; AVX2-NEXT: cmpq %rsi, %rax
1827 ; AVX2-NEXT: movq %rdx, %rdi
1828 ; AVX2-NEXT: sbbq $0, %rdi
1829 ; AVX2-NEXT: cmovgeq %rcx, %rdx
1830 ; AVX2-NEXT: cmovgeq %rsi, %rax
1831 ; AVX2-NEXT: cmpq %rsi, %rbx
1832 ; AVX2-NEXT: movq %r14, %rdi
1833 ; AVX2-NEXT: sbbq $0, %rdi
1834 ; AVX2-NEXT: cmovlq %r14, %rcx
1835 ; AVX2-NEXT: cmovlq %rbx, %rsi
1836 ; AVX2-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
1837 ; AVX2-NEXT: cmpq %rsi, %rdi
1838 ; AVX2-NEXT: movq $-1, %r8
1839 ; AVX2-NEXT: sbbq %rcx, %r8
1840 ; AVX2-NEXT: movq $-1, %rcx
1841 ; AVX2-NEXT: cmovgeq %rdi, %rsi
1842 ; AVX2-NEXT: cmpq %rax, %rdi
1843 ; AVX2-NEXT: sbbq %rdx, %rcx
1844 ; AVX2-NEXT: cmovgeq %rdi, %rax
1845 ; AVX2-NEXT: vmovq %rax, %xmm0
1846 ; AVX2-NEXT: vmovq %rsi, %xmm1
1847 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1848 ; AVX2-NEXT: addq $24, %rsp
1849 ; AVX2-NEXT: popq %rbx
1850 ; AVX2-NEXT: popq %r14
1853 ; AVX512-LABEL: stest_f64i64:
1854 ; AVX512: # %bb.0: # %entry
1855 ; AVX512-NEXT: pushq %r14
1856 ; AVX512-NEXT: pushq %rbx
1857 ; AVX512-NEXT: subq $24, %rsp
1858 ; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
1859 ; AVX512-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
1860 ; AVX512-NEXT: callq __fixdfti@PLT
1861 ; AVX512-NEXT: movq %rax, %rbx
1862 ; AVX512-NEXT: movq %rdx, %r14
1863 ; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
1864 ; AVX512-NEXT: callq __fixdfti@PLT
1865 ; AVX512-NEXT: xorl %ecx, %ecx
1866 ; AVX512-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
1867 ; AVX512-NEXT: cmpq %rsi, %rax
1868 ; AVX512-NEXT: movq %rdx, %rdi
1869 ; AVX512-NEXT: sbbq $0, %rdi
1870 ; AVX512-NEXT: cmovgeq %rcx, %rdx
1871 ; AVX512-NEXT: cmovgeq %rsi, %rax
1872 ; AVX512-NEXT: cmpq %rsi, %rbx
1873 ; AVX512-NEXT: movq %r14, %rdi
1874 ; AVX512-NEXT: sbbq $0, %rdi
1875 ; AVX512-NEXT: cmovlq %r14, %rcx
1876 ; AVX512-NEXT: cmovlq %rbx, %rsi
1877 ; AVX512-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
1878 ; AVX512-NEXT: cmpq %rsi, %rdi
1879 ; AVX512-NEXT: movq $-1, %r8
1880 ; AVX512-NEXT: movq $-1, %r9
1881 ; AVX512-NEXT: sbbq %rcx, %r9
1882 ; AVX512-NEXT: cmovgeq %rdi, %rsi
1883 ; AVX512-NEXT: cmpq %rax, %rdi
1884 ; AVX512-NEXT: sbbq %rdx, %r8
1885 ; AVX512-NEXT: cmovgeq %rdi, %rax
1886 ; AVX512-NEXT: vmovq %rax, %xmm0
1887 ; AVX512-NEXT: vmovq %rsi, %xmm1
1888 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1889 ; AVX512-NEXT: addq $24, %rsp
1890 ; AVX512-NEXT: popq %rbx
1891 ; AVX512-NEXT: popq %r14
1894 %conv = fptosi <2 x double> %x to <2 x i128>
1895 %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807>
1896 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>
1897 %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808>
1898 %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>
1899 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
1900 ret <2 x i64> %conv6
1903 define <2 x i64> @utest_f64i64(<2 x double> %x) nounwind {
1904 ; SSE-LABEL: utest_f64i64:
1905 ; SSE: # %bb.0: # %entry
1906 ; SSE-NEXT: pushq %r14
1907 ; SSE-NEXT: pushq %rbx
1908 ; SSE-NEXT: subq $24, %rsp
1909 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
1910 ; SSE-NEXT: callq __fixunsdfti@PLT
1911 ; SSE-NEXT: movq %rax, %rbx
1912 ; SSE-NEXT: movq %rdx, %r14
1913 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1914 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
1915 ; SSE-NEXT: callq __fixunsdfti@PLT
1916 ; SSE-NEXT: xorl %ecx, %ecx
1917 ; SSE-NEXT: testq %rdx, %rdx
1918 ; SSE-NEXT: cmovneq %rcx, %rax
1919 ; SSE-NEXT: testq %r14, %r14
1920 ; SSE-NEXT: cmovneq %rcx, %rbx
1921 ; SSE-NEXT: movq %rbx, %xmm0
1922 ; SSE-NEXT: movq %rax, %xmm1
1923 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1924 ; SSE-NEXT: addq $24, %rsp
1925 ; SSE-NEXT: popq %rbx
1926 ; SSE-NEXT: popq %r14
1929 ; AVX-LABEL: utest_f64i64:
1930 ; AVX: # %bb.0: # %entry
1931 ; AVX-NEXT: pushq %r14
1932 ; AVX-NEXT: pushq %rbx
1933 ; AVX-NEXT: subq $24, %rsp
1934 ; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
1935 ; AVX-NEXT: callq __fixunsdfti@PLT
1936 ; AVX-NEXT: movq %rax, %rbx
1937 ; AVX-NEXT: movq %rdx, %r14
1938 ; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
1939 ; AVX-NEXT: # xmm0 = mem[1,0]
1940 ; AVX-NEXT: callq __fixunsdfti@PLT
1941 ; AVX-NEXT: xorl %ecx, %ecx
1942 ; AVX-NEXT: testq %rdx, %rdx
1943 ; AVX-NEXT: cmovneq %rcx, %rax
1944 ; AVX-NEXT: testq %r14, %r14
1945 ; AVX-NEXT: cmovneq %rcx, %rbx
1946 ; AVX-NEXT: vmovq %rbx, %xmm0
1947 ; AVX-NEXT: vmovq %rax, %xmm1
1948 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1949 ; AVX-NEXT: addq $24, %rsp
1950 ; AVX-NEXT: popq %rbx
1951 ; AVX-NEXT: popq %r14
1954 %conv = fptoui <2 x double> %x to <2 x i128>
1955 %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
1956 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
1957 %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
1958 ret <2 x i64> %conv6
1961 define <2 x i64> @ustest_f64i64(<2 x double> %x) nounwind {
1962 ; SSE-LABEL: ustest_f64i64:
1963 ; SSE: # %bb.0: # %entry
1964 ; SSE-NEXT: pushq %r14
1965 ; SSE-NEXT: pushq %rbx
1966 ; SSE-NEXT: subq $24, %rsp
1967 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
1968 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
1969 ; SSE-NEXT: callq __fixdfti@PLT
1970 ; SSE-NEXT: movq %rax, %rbx
1971 ; SSE-NEXT: movq %rdx, %r14
1972 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
1973 ; SSE-NEXT: callq __fixdfti@PLT
1974 ; SSE-NEXT: xorl %ecx, %ecx
1975 ; SSE-NEXT: testq %rdx, %rdx
1976 ; SSE-NEXT: movl $1, %esi
1977 ; SSE-NEXT: cmovgq %rsi, %rdx
1978 ; SSE-NEXT: cmovgq %rcx, %rax
1979 ; SSE-NEXT: testq %r14, %r14
1980 ; SSE-NEXT: cmovleq %r14, %rsi
1981 ; SSE-NEXT: cmovgq %rcx, %rbx
1982 ; SSE-NEXT: movq %rbx, %rdi
1983 ; SSE-NEXT: negq %rdi
1984 ; SSE-NEXT: movl $0, %edi
1985 ; SSE-NEXT: sbbq %rsi, %rdi
1986 ; SSE-NEXT: cmovgeq %rcx, %rbx
1987 ; SSE-NEXT: movq %rax, %rsi
1988 ; SSE-NEXT: negq %rsi
1989 ; SSE-NEXT: movl $0, %esi
1990 ; SSE-NEXT: sbbq %rdx, %rsi
1991 ; SSE-NEXT: cmovgeq %rcx, %rax
1992 ; SSE-NEXT: movq %rax, %xmm0
1993 ; SSE-NEXT: movq %rbx, %xmm1
1994 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1995 ; SSE-NEXT: addq $24, %rsp
1996 ; SSE-NEXT: popq %rbx
1997 ; SSE-NEXT: popq %r14
2000 ; AVX-LABEL: ustest_f64i64:
2001 ; AVX: # %bb.0: # %entry
2002 ; AVX-NEXT: pushq %r14
2003 ; AVX-NEXT: pushq %rbx
2004 ; AVX-NEXT: subq $24, %rsp
2005 ; AVX-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
2006 ; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
2007 ; AVX-NEXT: callq __fixdfti@PLT
2008 ; AVX-NEXT: movq %rax, %rbx
2009 ; AVX-NEXT: movq %rdx, %r14
2010 ; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
2011 ; AVX-NEXT: callq __fixdfti@PLT
2012 ; AVX-NEXT: xorl %ecx, %ecx
2013 ; AVX-NEXT: testq %rdx, %rdx
2014 ; AVX-NEXT: movl $1, %esi
2015 ; AVX-NEXT: cmovgq %rsi, %rdx
2016 ; AVX-NEXT: cmovgq %rcx, %rax
2017 ; AVX-NEXT: testq %r14, %r14
2018 ; AVX-NEXT: cmovleq %r14, %rsi
2019 ; AVX-NEXT: cmovgq %rcx, %rbx
2020 ; AVX-NEXT: movq %rbx, %rdi
2021 ; AVX-NEXT: negq %rdi
2022 ; AVX-NEXT: movl $0, %edi
2023 ; AVX-NEXT: sbbq %rsi, %rdi
2024 ; AVX-NEXT: cmovgeq %rcx, %rbx
2025 ; AVX-NEXT: movq %rax, %rsi
2026 ; AVX-NEXT: negq %rsi
2027 ; AVX-NEXT: movl $0, %esi
2028 ; AVX-NEXT: sbbq %rdx, %rsi
2029 ; AVX-NEXT: cmovgeq %rcx, %rax
2030 ; AVX-NEXT: vmovq %rax, %xmm0
2031 ; AVX-NEXT: vmovq %rbx, %xmm1
2032 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2033 ; AVX-NEXT: addq $24, %rsp
2034 ; AVX-NEXT: popq %rbx
2035 ; AVX-NEXT: popq %r14
2038 %conv = fptosi <2 x double> %x to <2 x i128>
2039 %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
2040 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
2041 %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer
2042 %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer
2043 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
2044 ret <2 x i64> %conv6
2047 define <2 x i64> @stest_f32i64(<2 x float> %x) nounwind {
2048 ; SSE-LABEL: stest_f32i64:
2049 ; SSE: # %bb.0: # %entry
2050 ; SSE-NEXT: pushq %r14
2051 ; SSE-NEXT: pushq %rbx
2052 ; SSE-NEXT: subq $24, %rsp
2053 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
2054 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2055 ; SSE-NEXT: callq __fixsfti@PLT
2056 ; SSE-NEXT: movq %rax, %rbx
2057 ; SSE-NEXT: movq %rdx, %r14
2058 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2059 ; SSE-NEXT: callq __fixsfti@PLT
2060 ; SSE-NEXT: xorl %ecx, %ecx
2061 ; SSE-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
2062 ; SSE-NEXT: cmpq %rsi, %rax
2063 ; SSE-NEXT: movq %rdx, %rdi
2064 ; SSE-NEXT: sbbq $0, %rdi
2065 ; SSE-NEXT: cmovgeq %rcx, %rdx
2066 ; SSE-NEXT: cmovgeq %rsi, %rax
2067 ; SSE-NEXT: cmpq %rsi, %rbx
2068 ; SSE-NEXT: movq %r14, %rdi
2069 ; SSE-NEXT: sbbq $0, %rdi
2070 ; SSE-NEXT: cmovlq %r14, %rcx
2071 ; SSE-NEXT: cmovlq %rbx, %rsi
2072 ; SSE-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
2073 ; SSE-NEXT: cmpq %rsi, %rdi
2074 ; SSE-NEXT: movq $-1, %r8
2075 ; SSE-NEXT: movq $-1, %r9
2076 ; SSE-NEXT: sbbq %rcx, %r9
2077 ; SSE-NEXT: cmovgeq %rdi, %rsi
2078 ; SSE-NEXT: cmpq %rax, %rdi
2079 ; SSE-NEXT: sbbq %rdx, %r8
2080 ; SSE-NEXT: cmovgeq %rdi, %rax
2081 ; SSE-NEXT: movq %rax, %xmm0
2082 ; SSE-NEXT: movq %rsi, %xmm1
2083 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2084 ; SSE-NEXT: addq $24, %rsp
2085 ; SSE-NEXT: popq %rbx
2086 ; SSE-NEXT: popq %r14
2089 ; AVX2-LABEL: stest_f32i64:
2090 ; AVX2: # %bb.0: # %entry
2091 ; AVX2-NEXT: pushq %r14
2092 ; AVX2-NEXT: pushq %rbx
2093 ; AVX2-NEXT: subq $24, %rsp
2094 ; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2095 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
2096 ; AVX2-NEXT: callq __fixsfti@PLT
2097 ; AVX2-NEXT: movq %rax, %rbx
2098 ; AVX2-NEXT: movq %rdx, %r14
2099 ; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
2100 ; AVX2-NEXT: callq __fixsfti@PLT
2101 ; AVX2-NEXT: xorl %ecx, %ecx
2102 ; AVX2-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
2103 ; AVX2-NEXT: cmpq %rsi, %rax
2104 ; AVX2-NEXT: movq %rdx, %rdi
2105 ; AVX2-NEXT: sbbq $0, %rdi
2106 ; AVX2-NEXT: cmovgeq %rcx, %rdx
2107 ; AVX2-NEXT: cmovgeq %rsi, %rax
2108 ; AVX2-NEXT: cmpq %rsi, %rbx
2109 ; AVX2-NEXT: movq %r14, %rdi
2110 ; AVX2-NEXT: sbbq $0, %rdi
2111 ; AVX2-NEXT: cmovlq %r14, %rcx
2112 ; AVX2-NEXT: cmovlq %rbx, %rsi
2113 ; AVX2-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
2114 ; AVX2-NEXT: cmpq %rsi, %rdi
2115 ; AVX2-NEXT: movq $-1, %r8
2116 ; AVX2-NEXT: sbbq %rcx, %r8
2117 ; AVX2-NEXT: movq $-1, %rcx
2118 ; AVX2-NEXT: cmovgeq %rdi, %rsi
2119 ; AVX2-NEXT: cmpq %rax, %rdi
2120 ; AVX2-NEXT: sbbq %rdx, %rcx
2121 ; AVX2-NEXT: cmovgeq %rdi, %rax
2122 ; AVX2-NEXT: vmovq %rax, %xmm0
2123 ; AVX2-NEXT: vmovq %rsi, %xmm1
2124 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2125 ; AVX2-NEXT: addq $24, %rsp
2126 ; AVX2-NEXT: popq %rbx
2127 ; AVX2-NEXT: popq %r14
2130 ; AVX512-LABEL: stest_f32i64:
2131 ; AVX512: # %bb.0: # %entry
2132 ; AVX512-NEXT: pushq %r14
2133 ; AVX512-NEXT: pushq %rbx
2134 ; AVX512-NEXT: subq $24, %rsp
2135 ; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2136 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
2137 ; AVX512-NEXT: callq __fixsfti@PLT
2138 ; AVX512-NEXT: movq %rax, %rbx
2139 ; AVX512-NEXT: movq %rdx, %r14
2140 ; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
2141 ; AVX512-NEXT: callq __fixsfti@PLT
2142 ; AVX512-NEXT: xorl %ecx, %ecx
2143 ; AVX512-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
2144 ; AVX512-NEXT: cmpq %rsi, %rax
2145 ; AVX512-NEXT: movq %rdx, %rdi
2146 ; AVX512-NEXT: sbbq $0, %rdi
2147 ; AVX512-NEXT: cmovgeq %rcx, %rdx
2148 ; AVX512-NEXT: cmovgeq %rsi, %rax
2149 ; AVX512-NEXT: cmpq %rsi, %rbx
2150 ; AVX512-NEXT: movq %r14, %rdi
2151 ; AVX512-NEXT: sbbq $0, %rdi
2152 ; AVX512-NEXT: cmovlq %r14, %rcx
2153 ; AVX512-NEXT: cmovlq %rbx, %rsi
2154 ; AVX512-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
2155 ; AVX512-NEXT: cmpq %rsi, %rdi
2156 ; AVX512-NEXT: movq $-1, %r8
2157 ; AVX512-NEXT: movq $-1, %r9
2158 ; AVX512-NEXT: sbbq %rcx, %r9
2159 ; AVX512-NEXT: cmovgeq %rdi, %rsi
2160 ; AVX512-NEXT: cmpq %rax, %rdi
2161 ; AVX512-NEXT: sbbq %rdx, %r8
2162 ; AVX512-NEXT: cmovgeq %rdi, %rax
2163 ; AVX512-NEXT: vmovq %rax, %xmm0
2164 ; AVX512-NEXT: vmovq %rsi, %xmm1
2165 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2166 ; AVX512-NEXT: addq $24, %rsp
2167 ; AVX512-NEXT: popq %rbx
2168 ; AVX512-NEXT: popq %r14
2171 %conv = fptosi <2 x float> %x to <2 x i128>
2172 %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807>
2173 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>
2174 %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808>
2175 %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>
2176 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
2177 ret <2 x i64> %conv6
2180 define <2 x i64> @utest_f32i64(<2 x float> %x) nounwind {
2181 ; SSE-LABEL: utest_f32i64:
2182 ; SSE: # %bb.0: # %entry
2183 ; SSE-NEXT: pushq %r14
2184 ; SSE-NEXT: pushq %rbx
2185 ; SSE-NEXT: subq $24, %rsp
2186 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
2187 ; SSE-NEXT: callq __fixunssfti@PLT
2188 ; SSE-NEXT: movq %rax, %rbx
2189 ; SSE-NEXT: movq %rdx, %r14
2190 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2191 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2192 ; SSE-NEXT: callq __fixunssfti@PLT
2193 ; SSE-NEXT: xorl %ecx, %ecx
2194 ; SSE-NEXT: testq %rdx, %rdx
2195 ; SSE-NEXT: cmovneq %rcx, %rax
2196 ; SSE-NEXT: testq %r14, %r14
2197 ; SSE-NEXT: cmovneq %rcx, %rbx
2198 ; SSE-NEXT: movq %rbx, %xmm0
2199 ; SSE-NEXT: movq %rax, %xmm1
2200 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2201 ; SSE-NEXT: addq $24, %rsp
2202 ; SSE-NEXT: popq %rbx
2203 ; SSE-NEXT: popq %r14
2206 ; AVX-LABEL: utest_f32i64:
2207 ; AVX: # %bb.0: # %entry
2208 ; AVX-NEXT: pushq %r14
2209 ; AVX-NEXT: pushq %rbx
2210 ; AVX-NEXT: subq $24, %rsp
2211 ; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2212 ; AVX-NEXT: callq __fixunssfti@PLT
2213 ; AVX-NEXT: movq %rax, %rbx
2214 ; AVX-NEXT: movq %rdx, %r14
2215 ; AVX-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
2216 ; AVX-NEXT: # xmm0 = mem[1,1,3,3]
2217 ; AVX-NEXT: callq __fixunssfti@PLT
2218 ; AVX-NEXT: xorl %ecx, %ecx
2219 ; AVX-NEXT: testq %rdx, %rdx
2220 ; AVX-NEXT: cmovneq %rcx, %rax
2221 ; AVX-NEXT: testq %r14, %r14
2222 ; AVX-NEXT: cmovneq %rcx, %rbx
2223 ; AVX-NEXT: vmovq %rbx, %xmm0
2224 ; AVX-NEXT: vmovq %rax, %xmm1
2225 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2226 ; AVX-NEXT: addq $24, %rsp
2227 ; AVX-NEXT: popq %rbx
2228 ; AVX-NEXT: popq %r14
2231 %conv = fptoui <2 x float> %x to <2 x i128>
2232 %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
2233 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
2234 %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
2235 ret <2 x i64> %conv6
2238 define <2 x i64> @ustest_f32i64(<2 x float> %x) nounwind {
2239 ; SSE-LABEL: ustest_f32i64:
2240 ; SSE: # %bb.0: # %entry
2241 ; SSE-NEXT: pushq %r14
2242 ; SSE-NEXT: pushq %rbx
2243 ; SSE-NEXT: subq $24, %rsp
2244 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
2245 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2246 ; SSE-NEXT: callq __fixsfti@PLT
2247 ; SSE-NEXT: movq %rax, %rbx
2248 ; SSE-NEXT: movq %rdx, %r14
2249 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2250 ; SSE-NEXT: callq __fixsfti@PLT
2251 ; SSE-NEXT: xorl %ecx, %ecx
2252 ; SSE-NEXT: testq %rdx, %rdx
2253 ; SSE-NEXT: movl $1, %esi
2254 ; SSE-NEXT: cmovgq %rsi, %rdx
2255 ; SSE-NEXT: cmovgq %rcx, %rax
2256 ; SSE-NEXT: testq %r14, %r14
2257 ; SSE-NEXT: cmovleq %r14, %rsi
2258 ; SSE-NEXT: cmovgq %rcx, %rbx
2259 ; SSE-NEXT: movq %rbx, %rdi
2260 ; SSE-NEXT: negq %rdi
2261 ; SSE-NEXT: movl $0, %edi
2262 ; SSE-NEXT: sbbq %rsi, %rdi
2263 ; SSE-NEXT: cmovgeq %rcx, %rbx
2264 ; SSE-NEXT: movq %rax, %rsi
2265 ; SSE-NEXT: negq %rsi
2266 ; SSE-NEXT: movl $0, %esi
2267 ; SSE-NEXT: sbbq %rdx, %rsi
2268 ; SSE-NEXT: cmovgeq %rcx, %rax
2269 ; SSE-NEXT: movq %rax, %xmm0
2270 ; SSE-NEXT: movq %rbx, %xmm1
2271 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2272 ; SSE-NEXT: addq $24, %rsp
2273 ; SSE-NEXT: popq %rbx
2274 ; SSE-NEXT: popq %r14
2277 ; AVX-LABEL: ustest_f32i64:
2278 ; AVX: # %bb.0: # %entry
2279 ; AVX-NEXT: pushq %r14
2280 ; AVX-NEXT: pushq %rbx
2281 ; AVX-NEXT: subq $24, %rsp
2282 ; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2283 ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
2284 ; AVX-NEXT: callq __fixsfti@PLT
2285 ; AVX-NEXT: movq %rax, %rbx
2286 ; AVX-NEXT: movq %rdx, %r14
2287 ; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
2288 ; AVX-NEXT: callq __fixsfti@PLT
2289 ; AVX-NEXT: xorl %ecx, %ecx
2290 ; AVX-NEXT: testq %rdx, %rdx
2291 ; AVX-NEXT: movl $1, %esi
2292 ; AVX-NEXT: cmovgq %rsi, %rdx
2293 ; AVX-NEXT: cmovgq %rcx, %rax
2294 ; AVX-NEXT: testq %r14, %r14
2295 ; AVX-NEXT: cmovleq %r14, %rsi
2296 ; AVX-NEXT: cmovgq %rcx, %rbx
2297 ; AVX-NEXT: movq %rbx, %rdi
2298 ; AVX-NEXT: negq %rdi
2299 ; AVX-NEXT: movl $0, %edi
2300 ; AVX-NEXT: sbbq %rsi, %rdi
2301 ; AVX-NEXT: cmovgeq %rcx, %rbx
2302 ; AVX-NEXT: movq %rax, %rsi
2303 ; AVX-NEXT: negq %rsi
2304 ; AVX-NEXT: movl $0, %esi
2305 ; AVX-NEXT: sbbq %rdx, %rsi
2306 ; AVX-NEXT: cmovgeq %rcx, %rax
2307 ; AVX-NEXT: vmovq %rax, %xmm0
2308 ; AVX-NEXT: vmovq %rbx, %xmm1
2309 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2310 ; AVX-NEXT: addq $24, %rsp
2311 ; AVX-NEXT: popq %rbx
2312 ; AVX-NEXT: popq %r14
2315 %conv = fptosi <2 x float> %x to <2 x i128>
2316 %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
2317 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
2318 %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer
2319 %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer
2320 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
2321 ret <2 x i64> %conv6
2324 define <2 x i64> @stest_f16i64(<2 x half> %x) nounwind {
2325 ; SSE-LABEL: stest_f16i64:
2326 ; SSE: # %bb.0: # %entry
2327 ; SSE-NEXT: pushq %r14
2328 ; SSE-NEXT: pushq %rbx
2329 ; SSE-NEXT: subq $24, %rsp
2330 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
2331 ; SSE-NEXT: psrld $16, %xmm0
2332 ; SSE-NEXT: callq __fixhfti@PLT
2333 ; SSE-NEXT: movq %rax, %rbx
2334 ; SSE-NEXT: movq %rdx, %r14
2335 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2336 ; SSE-NEXT: callq __fixhfti@PLT
2337 ; SSE-NEXT: xorl %ecx, %ecx
2338 ; SSE-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
2339 ; SSE-NEXT: cmpq %rsi, %rax
2340 ; SSE-NEXT: movq %rdx, %rdi
2341 ; SSE-NEXT: sbbq $0, %rdi
2342 ; SSE-NEXT: cmovgeq %rcx, %rdx
2343 ; SSE-NEXT: cmovgeq %rsi, %rax
2344 ; SSE-NEXT: cmpq %rsi, %rbx
2345 ; SSE-NEXT: movq %r14, %rdi
2346 ; SSE-NEXT: sbbq $0, %rdi
2347 ; SSE-NEXT: cmovlq %r14, %rcx
2348 ; SSE-NEXT: cmovlq %rbx, %rsi
2349 ; SSE-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
2350 ; SSE-NEXT: cmpq %rsi, %rdi
2351 ; SSE-NEXT: movq $-1, %r8
2352 ; SSE-NEXT: movq $-1, %r9
2353 ; SSE-NEXT: sbbq %rcx, %r9
2354 ; SSE-NEXT: cmovgeq %rdi, %rsi
2355 ; SSE-NEXT: cmpq %rax, %rdi
2356 ; SSE-NEXT: sbbq %rdx, %r8
2357 ; SSE-NEXT: cmovgeq %rdi, %rax
2358 ; SSE-NEXT: movq %rax, %xmm0
2359 ; SSE-NEXT: movq %rsi, %xmm1
2360 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2361 ; SSE-NEXT: addq $24, %rsp
2362 ; SSE-NEXT: popq %rbx
2363 ; SSE-NEXT: popq %r14
2366 ; AVX2-LABEL: stest_f16i64:
2367 ; AVX2: # %bb.0: # %entry
2368 ; AVX2-NEXT: pushq %r14
2369 ; AVX2-NEXT: pushq %rbx
2370 ; AVX2-NEXT: subq $24, %rsp
2371 ; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
2372 ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
2373 ; AVX2-NEXT: callq __fixhfti@PLT
2374 ; AVX2-NEXT: movq %rax, %rbx
2375 ; AVX2-NEXT: movq %rdx, %r14
2376 ; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
2377 ; AVX2-NEXT: callq __fixhfti@PLT
2378 ; AVX2-NEXT: xorl %ecx, %ecx
2379 ; AVX2-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
2380 ; AVX2-NEXT: cmpq %rsi, %rax
2381 ; AVX2-NEXT: movq %rdx, %rdi
2382 ; AVX2-NEXT: sbbq $0, %rdi
2383 ; AVX2-NEXT: cmovgeq %rcx, %rdx
2384 ; AVX2-NEXT: cmovgeq %rsi, %rax
2385 ; AVX2-NEXT: cmpq %rsi, %rbx
2386 ; AVX2-NEXT: movq %r14, %rdi
2387 ; AVX2-NEXT: sbbq $0, %rdi
2388 ; AVX2-NEXT: cmovlq %r14, %rcx
2389 ; AVX2-NEXT: cmovlq %rbx, %rsi
2390 ; AVX2-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
2391 ; AVX2-NEXT: cmpq %rsi, %rdi
2392 ; AVX2-NEXT: movq $-1, %r8
2393 ; AVX2-NEXT: sbbq %rcx, %r8
2394 ; AVX2-NEXT: movq $-1, %rcx
2395 ; AVX2-NEXT: cmovgeq %rdi, %rsi
2396 ; AVX2-NEXT: cmpq %rax, %rdi
2397 ; AVX2-NEXT: sbbq %rdx, %rcx
2398 ; AVX2-NEXT: cmovgeq %rdi, %rax
2399 ; AVX2-NEXT: vmovq %rax, %xmm0
2400 ; AVX2-NEXT: vmovq %rsi, %xmm1
2401 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2402 ; AVX2-NEXT: addq $24, %rsp
2403 ; AVX2-NEXT: popq %rbx
2404 ; AVX2-NEXT: popq %r14
2407 ; AVX512-LABEL: stest_f16i64:
2408 ; AVX512: # %bb.0: # %entry
2409 ; AVX512-NEXT: pushq %r14
2410 ; AVX512-NEXT: pushq %rbx
2411 ; AVX512-NEXT: subq $24, %rsp
2412 ; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
2413 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
2414 ; AVX512-NEXT: callq __fixhfti@PLT
2415 ; AVX512-NEXT: movq %rax, %rbx
2416 ; AVX512-NEXT: movq %rdx, %r14
2417 ; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
2418 ; AVX512-NEXT: callq __fixhfti@PLT
2419 ; AVX512-NEXT: xorl %ecx, %ecx
2420 ; AVX512-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
2421 ; AVX512-NEXT: cmpq %rsi, %rax
2422 ; AVX512-NEXT: movq %rdx, %rdi
2423 ; AVX512-NEXT: sbbq $0, %rdi
2424 ; AVX512-NEXT: cmovgeq %rcx, %rdx
2425 ; AVX512-NEXT: cmovgeq %rsi, %rax
2426 ; AVX512-NEXT: cmpq %rsi, %rbx
2427 ; AVX512-NEXT: movq %r14, %rdi
2428 ; AVX512-NEXT: sbbq $0, %rdi
2429 ; AVX512-NEXT: cmovlq %r14, %rcx
2430 ; AVX512-NEXT: cmovlq %rbx, %rsi
2431 ; AVX512-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
2432 ; AVX512-NEXT: cmpq %rsi, %rdi
2433 ; AVX512-NEXT: movq $-1, %r8
2434 ; AVX512-NEXT: movq $-1, %r9
2435 ; AVX512-NEXT: sbbq %rcx, %r9
2436 ; AVX512-NEXT: cmovgeq %rdi, %rsi
2437 ; AVX512-NEXT: cmpq %rax, %rdi
2438 ; AVX512-NEXT: sbbq %rdx, %r8
2439 ; AVX512-NEXT: cmovgeq %rdi, %rax
2440 ; AVX512-NEXT: vmovq %rax, %xmm0
2441 ; AVX512-NEXT: vmovq %rsi, %xmm1
2442 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2443 ; AVX512-NEXT: addq $24, %rsp
2444 ; AVX512-NEXT: popq %rbx
2445 ; AVX512-NEXT: popq %r14
2448 %conv = fptosi <2 x half> %x to <2 x i128>
2449 %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807>
2450 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>
2451 %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808>
2452 %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>
2453 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
2454 ret <2 x i64> %conv6
2457 define <2 x i64> @utesth_f16i64(<2 x half> %x) nounwind {
2458 ; SSE-LABEL: utesth_f16i64:
2459 ; SSE: # %bb.0: # %entry
2460 ; SSE-NEXT: pushq %r14
2461 ; SSE-NEXT: pushq %rbx
2462 ; SSE-NEXT: subq $24, %rsp
2463 ; SSE-NEXT: movdqa %xmm0, %xmm1
2464 ; SSE-NEXT: psrld $16, %xmm1
2465 ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
2466 ; SSE-NEXT: callq __fixunshfti@PLT
2467 ; SSE-NEXT: movq %rax, %rbx
2468 ; SSE-NEXT: movq %rdx, %r14
2469 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2470 ; SSE-NEXT: callq __fixunshfti@PLT
2471 ; SSE-NEXT: xorl %ecx, %ecx
2472 ; SSE-NEXT: testq %rdx, %rdx
2473 ; SSE-NEXT: cmovneq %rcx, %rax
2474 ; SSE-NEXT: testq %r14, %r14
2475 ; SSE-NEXT: cmovneq %rcx, %rbx
2476 ; SSE-NEXT: movq %rbx, %xmm0
2477 ; SSE-NEXT: movq %rax, %xmm1
2478 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2479 ; SSE-NEXT: addq $24, %rsp
2480 ; SSE-NEXT: popq %rbx
2481 ; SSE-NEXT: popq %r14
2484 ; AVX2-LABEL: utesth_f16i64:
2485 ; AVX2: # %bb.0: # %entry
2486 ; AVX2-NEXT: pushq %r14
2487 ; AVX2-NEXT: pushq %rbx
2488 ; AVX2-NEXT: subq $24, %rsp
2489 ; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2490 ; AVX2-NEXT: callq __fixunshfti@PLT
2491 ; AVX2-NEXT: movq %rax, %rbx
2492 ; AVX2-NEXT: movq %rdx, %r14
2493 ; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
2494 ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
2495 ; AVX2-NEXT: callq __fixunshfti@PLT
2496 ; AVX2-NEXT: xorl %ecx, %ecx
2497 ; AVX2-NEXT: testq %rdx, %rdx
2498 ; AVX2-NEXT: cmovneq %rcx, %rax
2499 ; AVX2-NEXT: testq %r14, %r14
2500 ; AVX2-NEXT: cmovneq %rcx, %rbx
2501 ; AVX2-NEXT: vmovq %rbx, %xmm0
2502 ; AVX2-NEXT: vmovq %rax, %xmm1
2503 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2504 ; AVX2-NEXT: addq $24, %rsp
2505 ; AVX2-NEXT: popq %rbx
2506 ; AVX2-NEXT: popq %r14
2509 ; AVX512-LABEL: utesth_f16i64:
2510 ; AVX512: # %bb.0: # %entry
2511 ; AVX512-NEXT: pushq %r14
2512 ; AVX512-NEXT: pushq %rbx
2513 ; AVX512-NEXT: subq $24, %rsp
2514 ; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
2515 ; AVX512-NEXT: callq __fixunshfti@PLT
2516 ; AVX512-NEXT: movq %rax, %rbx
2517 ; AVX512-NEXT: movq %rdx, %r14
2518 ; AVX512-NEXT: vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
2519 ; AVX512-NEXT: callq __fixunshfti@PLT
2520 ; AVX512-NEXT: xorl %ecx, %ecx
2521 ; AVX512-NEXT: testq %rdx, %rdx
2522 ; AVX512-NEXT: cmovneq %rcx, %rax
2523 ; AVX512-NEXT: testq %r14, %r14
2524 ; AVX512-NEXT: cmovneq %rcx, %rbx
2525 ; AVX512-NEXT: vmovq %rbx, %xmm0
2526 ; AVX512-NEXT: vmovq %rax, %xmm1
2527 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2528 ; AVX512-NEXT: addq $24, %rsp
2529 ; AVX512-NEXT: popq %rbx
2530 ; AVX512-NEXT: popq %r14
2533 %conv = fptoui <2 x half> %x to <2 x i128>
2534 %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
2535 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
2536 %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
2537 ret <2 x i64> %conv6
2540 define <2 x i64> @ustest_f16i64(<2 x half> %x) nounwind {
2541 ; SSE-LABEL: ustest_f16i64:
2542 ; SSE: # %bb.0: # %entry
2543 ; SSE-NEXT: pushq %r14
2544 ; SSE-NEXT: pushq %rbx
2545 ; SSE-NEXT: subq $24, %rsp
2546 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
2547 ; SSE-NEXT: psrld $16, %xmm0
2548 ; SSE-NEXT: callq __fixhfti@PLT
2549 ; SSE-NEXT: movq %rax, %rbx
2550 ; SSE-NEXT: movq %rdx, %r14
2551 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
2552 ; SSE-NEXT: callq __fixhfti@PLT
2553 ; SSE-NEXT: xorl %ecx, %ecx
2554 ; SSE-NEXT: testq %rdx, %rdx
2555 ; SSE-NEXT: movl $1, %esi
2556 ; SSE-NEXT: cmovgq %rsi, %rdx
2557 ; SSE-NEXT: cmovgq %rcx, %rax
2558 ; SSE-NEXT: testq %r14, %r14
2559 ; SSE-NEXT: cmovleq %r14, %rsi
2560 ; SSE-NEXT: cmovgq %rcx, %rbx
2561 ; SSE-NEXT: movq %rbx, %rdi
2562 ; SSE-NEXT: negq %rdi
2563 ; SSE-NEXT: movl $0, %edi
2564 ; SSE-NEXT: sbbq %rsi, %rdi
2565 ; SSE-NEXT: cmovgeq %rcx, %rbx
2566 ; SSE-NEXT: movq %rax, %rsi
2567 ; SSE-NEXT: negq %rsi
2568 ; SSE-NEXT: movl $0, %esi
2569 ; SSE-NEXT: sbbq %rdx, %rsi
2570 ; SSE-NEXT: cmovgeq %rcx, %rax
2571 ; SSE-NEXT: movq %rax, %xmm0
2572 ; SSE-NEXT: movq %rbx, %xmm1
2573 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2574 ; SSE-NEXT: addq $24, %rsp
2575 ; SSE-NEXT: popq %rbx
2576 ; SSE-NEXT: popq %r14
2579 ; AVX-LABEL: ustest_f16i64:
2580 ; AVX: # %bb.0: # %entry
2581 ; AVX-NEXT: pushq %r14
2582 ; AVX-NEXT: pushq %rbx
2583 ; AVX-NEXT: subq $24, %rsp
2584 ; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
2585 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
2586 ; AVX-NEXT: callq __fixhfti@PLT
2587 ; AVX-NEXT: movq %rax, %rbx
2588 ; AVX-NEXT: movq %rdx, %r14
2589 ; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
2590 ; AVX-NEXT: callq __fixhfti@PLT
2591 ; AVX-NEXT: xorl %ecx, %ecx
2592 ; AVX-NEXT: testq %rdx, %rdx
2593 ; AVX-NEXT: movl $1, %esi
2594 ; AVX-NEXT: cmovgq %rsi, %rdx
2595 ; AVX-NEXT: cmovgq %rcx, %rax
2596 ; AVX-NEXT: testq %r14, %r14
2597 ; AVX-NEXT: cmovleq %r14, %rsi
2598 ; AVX-NEXT: cmovgq %rcx, %rbx
2599 ; AVX-NEXT: movq %rbx, %rdi
2600 ; AVX-NEXT: negq %rdi
2601 ; AVX-NEXT: movl $0, %edi
2602 ; AVX-NEXT: sbbq %rsi, %rdi
2603 ; AVX-NEXT: cmovgeq %rcx, %rbx
2604 ; AVX-NEXT: movq %rax, %rsi
2605 ; AVX-NEXT: negq %rsi
2606 ; AVX-NEXT: movl $0, %esi
2607 ; AVX-NEXT: sbbq %rdx, %rsi
2608 ; AVX-NEXT: cmovgeq %rcx, %rax
2609 ; AVX-NEXT: vmovq %rax, %xmm0
2610 ; AVX-NEXT: vmovq %rbx, %xmm1
2611 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2612 ; AVX-NEXT: addq $24, %rsp
2613 ; AVX-NEXT: popq %rbx
2614 ; AVX-NEXT: popq %r14
2617 %conv = fptosi <2 x half> %x to <2 x i128>
2618 %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
2619 %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
2620 %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer
2621 %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer
2622 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
2623 ret <2 x i64> %conv6
2630 define <2 x i32> @stest_f64i32_mm(<2 x double> %x) nounwind {
2631 ; SSE-LABEL: stest_f64i32_mm:
2632 ; SSE: # %bb.0: # %entry
2633 ; SSE-NEXT: cvttsd2si %xmm0, %rax
2634 ; SSE-NEXT: movq %rax, %xmm1
2635 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
2636 ; SSE-NEXT: cvttsd2si %xmm0, %rax
2637 ; SSE-NEXT: movq %rax, %xmm0
2638 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2639 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
2640 ; SSE-NEXT: movdqa %xmm1, %xmm2
2641 ; SSE-NEXT: pxor %xmm0, %xmm2
2642 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
2643 ; SSE-NEXT: pxor %xmm4, %xmm4
2644 ; SSE-NEXT: pcmpeqd %xmm3, %xmm4
2645 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4294967295,4294967295]
2646 ; SSE-NEXT: pcmpgtd %xmm2, %xmm3
2647 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
2648 ; SSE-NEXT: pand %xmm4, %xmm2
2649 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
2650 ; SSE-NEXT: por %xmm2, %xmm3
2651 ; SSE-NEXT: pand %xmm3, %xmm1
2652 ; SSE-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
2653 ; SSE-NEXT: por %xmm1, %xmm3
2654 ; SSE-NEXT: pxor %xmm3, %xmm0
2655 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
2656 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2
2657 ; SSE-NEXT: pcmpeqd %xmm1, %xmm2
2658 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
2659 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
2660 ; SSE-NEXT: pand %xmm2, %xmm1
2661 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2662 ; SSE-NEXT: por %xmm1, %xmm0
2663 ; SSE-NEXT: pand %xmm0, %xmm3
2664 ; SSE-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
2665 ; SSE-NEXT: por %xmm3, %xmm0
2666 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2669 ; AVX2-LABEL: stest_f64i32_mm:
2670 ; AVX2: # %bb.0: # %entry
2671 ; AVX2-NEXT: vcvttsd2si %xmm0, %rax
2672 ; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
2673 ; AVX2-NEXT: vcvttsd2si %xmm0, %rcx
2674 ; AVX2-NEXT: vmovq %rax, %xmm0
2675 ; AVX2-NEXT: vmovq %rcx, %xmm1
2676 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
2677 ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2147483647,2147483647]
2678 ; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
2679 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
2680 ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [18446744071562067968,18446744071562067968]
2681 ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
2682 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
2683 ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
2686 ; AVX512-LABEL: stest_f64i32_mm:
2687 ; AVX512: # %bb.0: # %entry
2688 ; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm0
2689 ; AVX512-NEXT: vpmovsqd %xmm0, %xmm0
2692 %conv = fptosi <2 x double> %x to <2 x i64>
2693 %spec.store.select = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %conv, <2 x i64> <i64 2147483647, i64 2147483647>)
2694 %spec.store.select7 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %spec.store.select, <2 x i64> <i64 -2147483648, i64 -2147483648>)
2695 %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32>
2696 ret <2 x i32> %conv6
2699 define <2 x i32> @utest_f64i32_mm(<2 x double> %x) nounwind {
2700 ; SSE-LABEL: utest_f64i32_mm:
2701 ; SSE: # %bb.0: # %entry
2702 ; SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
2703 ; SSE-NEXT: movapd %xmm0, %xmm2
2704 ; SSE-NEXT: subsd %xmm1, %xmm2
2705 ; SSE-NEXT: cvttsd2si %xmm2, %rax
2706 ; SSE-NEXT: cvttsd2si %xmm0, %rcx
2707 ; SSE-NEXT: movq %rcx, %rdx
2708 ; SSE-NEXT: sarq $63, %rdx
2709 ; SSE-NEXT: andq %rax, %rdx
2710 ; SSE-NEXT: orq %rcx, %rdx
2711 ; SSE-NEXT: movq %rdx, %xmm2
2712 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
2713 ; SSE-NEXT: cvttsd2si %xmm0, %rax
2714 ; SSE-NEXT: subsd %xmm1, %xmm0
2715 ; SSE-NEXT: cvttsd2si %xmm0, %rcx
2716 ; SSE-NEXT: movq %rax, %rdx
2717 ; SSE-NEXT: sarq $63, %rdx
2718 ; SSE-NEXT: andq %rcx, %rdx
2719 ; SSE-NEXT: orq %rax, %rdx
2720 ; SSE-NEXT: movq %rdx, %xmm0
2721 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
2722 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
2723 ; SSE-NEXT: pxor %xmm2, %xmm0
2724 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
2725 ; SSE-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
2726 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
2727 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
2728 ; SSE-NEXT: pandn %xmm1, %xmm0
2729 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
2730 ; SSE-NEXT: pxor %xmm0, %xmm1
2731 ; SSE-NEXT: pand %xmm2, %xmm0
2732 ; SSE-NEXT: por %xmm1, %xmm0
2733 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2736 ; AVX2-LABEL: utest_f64i32_mm:
2737 ; AVX2: # %bb.0: # %entry
2738 ; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
2739 ; AVX2-NEXT: vsubsd %xmm1, %xmm0, %xmm2
2740 ; AVX2-NEXT: vcvttsd2si %xmm2, %rax
2741 ; AVX2-NEXT: vcvttsd2si %xmm0, %rcx
2742 ; AVX2-NEXT: movq %rcx, %rdx
2743 ; AVX2-NEXT: sarq $63, %rdx
2744 ; AVX2-NEXT: andq %rax, %rdx
2745 ; AVX2-NEXT: orq %rcx, %rdx
2746 ; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
2747 ; AVX2-NEXT: vsubsd %xmm1, %xmm0, %xmm1
2748 ; AVX2-NEXT: vcvttsd2si %xmm1, %rax
2749 ; AVX2-NEXT: vmovq %rdx, %xmm1
2750 ; AVX2-NEXT: vcvttsd2si %xmm0, %rcx
2751 ; AVX2-NEXT: movq %rcx, %rdx
2752 ; AVX2-NEXT: sarq $63, %rdx
2753 ; AVX2-NEXT: andq %rax, %rdx
2754 ; AVX2-NEXT: orq %rcx, %rdx
2755 ; AVX2-NEXT: vmovq %rdx, %xmm0
2756 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
2757 ; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
2758 ; AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2759 ; AVX2-NEXT: vblendvpd %xmm1, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
2760 ; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
2763 ; AVX512-LABEL: utest_f64i32_mm:
2764 ; AVX512: # %bb.0: # %entry
2765 ; AVX512-NEXT: vcvttpd2uqq %xmm0, %xmm0
2766 ; AVX512-NEXT: vpmovusqd %xmm0, %xmm0
2769 %conv = fptoui <2 x double> %x to <2 x i64>
2770 %spec.store.select = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>)
2771 %conv6 = trunc <2 x i64> %spec.store.select to <2 x i32>
2772 ret <2 x i32> %conv6
2775 define <2 x i32> @ustest_f64i32_mm(<2 x double> %x) nounwind {
2776 ; SSE-LABEL: ustest_f64i32_mm:
2777 ; SSE: # %bb.0: # %entry
2778 ; SSE-NEXT: cvttsd2si %xmm0, %rax
2779 ; SSE-NEXT: movq %rax, %xmm1
2780 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
2781 ; SSE-NEXT: cvttsd2si %xmm0, %rax
2782 ; SSE-NEXT: movq %rax, %xmm0
2783 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
2784 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
2785 ; SSE-NEXT: movdqa %xmm1, %xmm2
2786 ; SSE-NEXT: pxor %xmm0, %xmm2
2787 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
2788 ; SSE-NEXT: pxor %xmm4, %xmm4
2789 ; SSE-NEXT: pcmpeqd %xmm3, %xmm4
2790 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647]
2791 ; SSE-NEXT: pcmpgtd %xmm2, %xmm3
2792 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
2793 ; SSE-NEXT: pand %xmm4, %xmm2
2794 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
2795 ; SSE-NEXT: por %xmm2, %xmm3
2796 ; SSE-NEXT: pand %xmm3, %xmm1
2797 ; SSE-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
2798 ; SSE-NEXT: por %xmm1, %xmm3
2799 ; SSE-NEXT: movdqa %xmm3, %xmm1
2800 ; SSE-NEXT: pxor %xmm0, %xmm1
2801 ; SSE-NEXT: movdqa %xmm1, %xmm2
2802 ; SSE-NEXT: pcmpgtd %xmm0, %xmm2
2803 ; SSE-NEXT: pcmpeqd %xmm0, %xmm1
2804 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
2805 ; SSE-NEXT: pand %xmm2, %xmm0
2806 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
2807 ; SSE-NEXT: por %xmm0, %xmm1
2808 ; SSE-NEXT: pand %xmm3, %xmm1
2809 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
2812 ; AVX2-LABEL: ustest_f64i32_mm:
2813 ; AVX2: # %bb.0: # %entry
2814 ; AVX2-NEXT: vcvttsd2si %xmm0, %rax
2815 ; AVX2-NEXT: vmovq %rax, %xmm1
2816 ; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
2817 ; AVX2-NEXT: vcvttsd2si %xmm0, %rax
2818 ; AVX2-NEXT: vmovq %rax, %xmm0
2819 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
2820 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm1 = [4294967295,0,4294967295,0]
2821 ; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
2822 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
2823 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
2824 ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm1
2825 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
2826 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
2829 ; AVX512-LABEL: ustest_f64i32_mm:
2830 ; AVX512: # %bb.0: # %entry
2831 ; AVX512-NEXT: vcvttpd2qq %xmm0, %xmm0
2832 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
2833 ; AVX512-NEXT: vpmaxsq %xmm1, %xmm0, %xmm0
2834 ; AVX512-NEXT: vpmovusqd %xmm0, %xmm0
2837 %conv = fptosi <2 x double> %x to <2 x i64>
2838 %spec.store.select = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>)
2839 %spec.store.select7 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %spec.store.select, <2 x i64> zeroinitializer)
2840 %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32>
2841 ret <2 x i32> %conv6
2844 define <4 x i32> @stest_f32i32_mm(<4 x float> %x) nounwind {
2845 ; SSE-LABEL: stest_f32i32_mm:
2846 ; SSE: # %bb.0: # %entry
2847 ; SSE-NEXT: movaps %xmm0, %xmm1
2848 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
2849 ; SSE-NEXT: cvttss2si %xmm1, %rax
2850 ; SSE-NEXT: movq %rax, %xmm1
2851 ; SSE-NEXT: movaps %xmm0, %xmm2
2852 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
2853 ; SSE-NEXT: cvttss2si %xmm2, %rax
2854 ; SSE-NEXT: movq %rax, %xmm2
2855 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
2856 ; SSE-NEXT: cvttss2si %xmm0, %rax
2857 ; SSE-NEXT: movq %rax, %xmm3
2858 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
2859 ; SSE-NEXT: cvttss2si %xmm0, %rax
2860 ; SSE-NEXT: movq %rax, %xmm0
2861 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
2862 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
2863 ; SSE-NEXT: movdqa %xmm3, %xmm1
2864 ; SSE-NEXT: pxor %xmm0, %xmm1
2865 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
2866 ; SSE-NEXT: pxor %xmm5, %xmm5
2867 ; SSE-NEXT: pcmpeqd %xmm5, %xmm4
2868 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [4294967295,4294967295]
2869 ; SSE-NEXT: movdqa %xmm6, %xmm7
2870 ; SSE-NEXT: pcmpgtd %xmm1, %xmm7
2871 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
2872 ; SSE-NEXT: pand %xmm4, %xmm8
2873 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,3,3]
2874 ; SSE-NEXT: por %xmm8, %xmm1
2875 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2147483647,2147483647]
2876 ; SSE-NEXT: pand %xmm1, %xmm3
2877 ; SSE-NEXT: pandn %xmm4, %xmm1
2878 ; SSE-NEXT: por %xmm3, %xmm1
2879 ; SSE-NEXT: movdqa %xmm2, %xmm3
2880 ; SSE-NEXT: pxor %xmm0, %xmm3
2881 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
2882 ; SSE-NEXT: pcmpeqd %xmm5, %xmm7
2883 ; SSE-NEXT: pcmpgtd %xmm3, %xmm6
2884 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,2,2]
2885 ; SSE-NEXT: pand %xmm7, %xmm3
2886 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
2887 ; SSE-NEXT: por %xmm3, %xmm5
2888 ; SSE-NEXT: pand %xmm5, %xmm2
2889 ; SSE-NEXT: pandn %xmm4, %xmm5
2890 ; SSE-NEXT: por %xmm2, %xmm5
2891 ; SSE-NEXT: movdqa %xmm5, %xmm2
2892 ; SSE-NEXT: pxor %xmm0, %xmm2
2893 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
2894 ; SSE-NEXT: pcmpeqd %xmm4, %xmm4
2895 ; SSE-NEXT: pcmpeqd %xmm4, %xmm3
2896 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [18446744069414584320,18446744069414584320]
2897 ; SSE-NEXT: pcmpgtd %xmm6, %xmm2
2898 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
2899 ; SSE-NEXT: pand %xmm3, %xmm7
2900 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
2901 ; SSE-NEXT: por %xmm7, %xmm2
2902 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [18446744071562067968,18446744071562067968]
2903 ; SSE-NEXT: pand %xmm2, %xmm5
2904 ; SSE-NEXT: pandn %xmm3, %xmm2
2905 ; SSE-NEXT: por %xmm5, %xmm2
2906 ; SSE-NEXT: pxor %xmm1, %xmm0
2907 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
2908 ; SSE-NEXT: pcmpeqd %xmm4, %xmm5
2909 ; SSE-NEXT: pcmpgtd %xmm6, %xmm0
2910 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
2911 ; SSE-NEXT: pand %xmm5, %xmm4
2912 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
2913 ; SSE-NEXT: por %xmm4, %xmm0
2914 ; SSE-NEXT: pand %xmm0, %xmm1
2915 ; SSE-NEXT: pandn %xmm3, %xmm0
2916 ; SSE-NEXT: por %xmm1, %xmm0
2917 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
2920 ; AVX2-LABEL: stest_f32i32_mm:
2921 ; AVX2: # %bb.0: # %entry
2922 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
2923 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
2924 ; AVX2-NEXT: vmovq %rax, %xmm1
2925 ; AVX2-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
2926 ; AVX2-NEXT: vcvttss2si %xmm2, %rax
2927 ; AVX2-NEXT: vmovq %rax, %xmm2
2928 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
2929 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
2930 ; AVX2-NEXT: vmovq %rax, %xmm2
2931 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
2932 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
2933 ; AVX2-NEXT: vmovq %rax, %xmm0
2934 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
2935 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
2936 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
2937 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
2938 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
2939 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
2940 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
2941 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
2942 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
2943 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
2944 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
2945 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
2946 ; AVX2-NEXT: vzeroupper
2949 ; AVX512-LABEL: stest_f32i32_mm:
2950 ; AVX512: # %bb.0: # %entry
2951 ; AVX512-NEXT: vcvttps2qq %xmm0, %ymm0
2952 ; AVX512-NEXT: vpmovsqd %ymm0, %xmm0
2953 ; AVX512-NEXT: vzeroupper
2956 %conv = fptosi <4 x float> %x to <4 x i64>
2957 %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
2958 %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>)
2959 %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
2960 ret <4 x i32> %conv6
2963 define <4 x i32> @utest_f32i32_mm(<4 x float> %x) nounwind {
2964 ; SSE-LABEL: utest_f32i32_mm:
2965 ; SSE: # %bb.0: # %entry
2966 ; SSE-NEXT: movss {{.*#+}} xmm2 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
2967 ; SSE-NEXT: movaps %xmm0, %xmm1
2968 ; SSE-NEXT: subss %xmm2, %xmm1
2969 ; SSE-NEXT: cvttss2si %xmm1, %rax
2970 ; SSE-NEXT: cvttss2si %xmm0, %rcx
2971 ; SSE-NEXT: movq %rcx, %rdx
2972 ; SSE-NEXT: sarq $63, %rdx
2973 ; SSE-NEXT: andq %rax, %rdx
2974 ; SSE-NEXT: orq %rcx, %rdx
2975 ; SSE-NEXT: movq %rdx, %xmm1
2976 ; SSE-NEXT: movaps %xmm0, %xmm3
2977 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1]
2978 ; SSE-NEXT: cvttss2si %xmm3, %rax
2979 ; SSE-NEXT: subss %xmm2, %xmm3
2980 ; SSE-NEXT: cvttss2si %xmm3, %rcx
2981 ; SSE-NEXT: movq %rax, %rdx
2982 ; SSE-NEXT: sarq $63, %rdx
2983 ; SSE-NEXT: andq %rcx, %rdx
2984 ; SSE-NEXT: orq %rax, %rdx
2985 ; SSE-NEXT: movq %rdx, %xmm3
2986 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
2987 ; SSE-NEXT: movaps %xmm0, %xmm3
2988 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm0[3,3]
2989 ; SSE-NEXT: cvttss2si %xmm3, %rax
2990 ; SSE-NEXT: subss %xmm2, %xmm3
2991 ; SSE-NEXT: cvttss2si %xmm3, %rcx
2992 ; SSE-NEXT: movq %rax, %rdx
2993 ; SSE-NEXT: sarq $63, %rdx
2994 ; SSE-NEXT: andq %rcx, %rdx
2995 ; SSE-NEXT: orq %rax, %rdx
2996 ; SSE-NEXT: movq %rdx, %xmm3
2997 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
2998 ; SSE-NEXT: cvttss2si %xmm0, %rax
2999 ; SSE-NEXT: subss %xmm2, %xmm0
3000 ; SSE-NEXT: cvttss2si %xmm0, %rcx
3001 ; SSE-NEXT: movq %rax, %rdx
3002 ; SSE-NEXT: sarq $63, %rdx
3003 ; SSE-NEXT: andq %rcx, %rdx
3004 ; SSE-NEXT: orq %rax, %rdx
3005 ; SSE-NEXT: movq %rdx, %xmm0
3006 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
3007 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
3008 ; SSE-NEXT: movdqa %xmm0, %xmm3
3009 ; SSE-NEXT: pxor %xmm2, %xmm3
3010 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
3011 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [2147483647,2147483647,2147483647,2147483647]
3012 ; SSE-NEXT: movdqa %xmm5, %xmm6
3013 ; SSE-NEXT: pcmpgtd %xmm4, %xmm6
3014 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
3015 ; SSE-NEXT: pcmpeqd %xmm2, %xmm3
3016 ; SSE-NEXT: pand %xmm6, %xmm3
3017 ; SSE-NEXT: pcmpeqd %xmm4, %xmm4
3018 ; SSE-NEXT: pand %xmm3, %xmm0
3019 ; SSE-NEXT: pxor %xmm4, %xmm3
3020 ; SSE-NEXT: por %xmm0, %xmm3
3021 ; SSE-NEXT: movdqa %xmm1, %xmm0
3022 ; SSE-NEXT: pxor %xmm2, %xmm0
3023 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,0,2,2]
3024 ; SSE-NEXT: pcmpgtd %xmm6, %xmm5
3025 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
3026 ; SSE-NEXT: pcmpeqd %xmm2, %xmm0
3027 ; SSE-NEXT: pand %xmm5, %xmm0
3028 ; SSE-NEXT: pxor %xmm0, %xmm4
3029 ; SSE-NEXT: pand %xmm1, %xmm0
3030 ; SSE-NEXT: por %xmm4, %xmm0
3031 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
3034 ; AVX2-LABEL: utest_f32i32_mm:
3035 ; AVX2: # %bb.0: # %entry
3036 ; AVX2-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,3,3,3]
3037 ; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
3038 ; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
3039 ; AVX2-NEXT: vcvttss2si %xmm3, %rax
3040 ; AVX2-NEXT: vcvttss2si %xmm2, %rcx
3041 ; AVX2-NEXT: movq %rcx, %rdx
3042 ; AVX2-NEXT: sarq $63, %rdx
3043 ; AVX2-NEXT: andq %rax, %rdx
3044 ; AVX2-NEXT: orq %rcx, %rdx
3045 ; AVX2-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
3046 ; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
3047 ; AVX2-NEXT: vcvttss2si %xmm3, %rax
3048 ; AVX2-NEXT: vcvttss2si %xmm2, %rcx
3049 ; AVX2-NEXT: vmovq %rdx, %xmm2
3050 ; AVX2-NEXT: movq %rcx, %rdx
3051 ; AVX2-NEXT: sarq $63, %rdx
3052 ; AVX2-NEXT: andq %rax, %rdx
3053 ; AVX2-NEXT: orq %rcx, %rdx
3054 ; AVX2-NEXT: vmovq %rdx, %xmm3
3055 ; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm4
3056 ; AVX2-NEXT: vcvttss2si %xmm4, %rax
3057 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
3058 ; AVX2-NEXT: vcvttss2si %xmm0, %rcx
3059 ; AVX2-NEXT: movq %rcx, %rdx
3060 ; AVX2-NEXT: sarq $63, %rdx
3061 ; AVX2-NEXT: andq %rax, %rdx
3062 ; AVX2-NEXT: orq %rcx, %rdx
3063 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
3064 ; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm1
3065 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
3066 ; AVX2-NEXT: vcvttss2si %xmm0, %rcx
3067 ; AVX2-NEXT: vmovq %rdx, %xmm0
3068 ; AVX2-NEXT: movq %rcx, %rdx
3069 ; AVX2-NEXT: sarq $63, %rdx
3070 ; AVX2-NEXT: andq %rax, %rdx
3071 ; AVX2-NEXT: orq %rcx, %rdx
3072 ; AVX2-NEXT: vmovq %rdx, %xmm1
3073 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
3074 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
3075 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
3076 ; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
3077 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372041149743102,9223372041149743102,9223372041149743102,9223372041149743102]
3078 ; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm1
3079 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
3080 ; AVX2-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
3081 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
3082 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
3083 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
3084 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
3085 ; AVX2-NEXT: vzeroupper
3088 ; AVX512-LABEL: utest_f32i32_mm:
3089 ; AVX512: # %bb.0: # %entry
3090 ; AVX512-NEXT: vcvttps2uqq %xmm0, %ymm0
3091 ; AVX512-NEXT: vpmovusqd %ymm0, %xmm0
3092 ; AVX512-NEXT: vzeroupper
3095 %conv = fptoui <4 x float> %x to <4 x i64>
3096 %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
3097 %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32>
3098 ret <4 x i32> %conv6
3101 define <4 x i32> @ustest_f32i32_mm(<4 x float> %x) nounwind {
3102 ; SSE-LABEL: ustest_f32i32_mm:
3103 ; SSE: # %bb.0: # %entry
3104 ; SSE-NEXT: movaps %xmm0, %xmm1
3105 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
3106 ; SSE-NEXT: cvttss2si %xmm1, %rax
3107 ; SSE-NEXT: movq %rax, %xmm1
3108 ; SSE-NEXT: movaps %xmm0, %xmm2
3109 ; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
3110 ; SSE-NEXT: cvttss2si %xmm2, %rax
3111 ; SSE-NEXT: movq %rax, %xmm2
3112 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
3113 ; SSE-NEXT: cvttss2si %xmm0, %rax
3114 ; SSE-NEXT: movq %rax, %xmm3
3115 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
3116 ; SSE-NEXT: cvttss2si %xmm0, %rax
3117 ; SSE-NEXT: movq %rax, %xmm0
3118 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
3119 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
3120 ; SSE-NEXT: movdqa %xmm3, %xmm1
3121 ; SSE-NEXT: pxor %xmm0, %xmm1
3122 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
3123 ; SSE-NEXT: pxor %xmm5, %xmm5
3124 ; SSE-NEXT: pcmpeqd %xmm5, %xmm4
3125 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [2147483647,2147483647]
3126 ; SSE-NEXT: movdqa %xmm6, %xmm7
3127 ; SSE-NEXT: pcmpgtd %xmm1, %xmm7
3128 ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
3129 ; SSE-NEXT: pand %xmm4, %xmm8
3130 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,3,3]
3131 ; SSE-NEXT: por %xmm8, %xmm1
3132 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [4294967295,4294967295]
3133 ; SSE-NEXT: pand %xmm1, %xmm3
3134 ; SSE-NEXT: pandn %xmm4, %xmm1
3135 ; SSE-NEXT: por %xmm3, %xmm1
3136 ; SSE-NEXT: movdqa %xmm2, %xmm3
3137 ; SSE-NEXT: pxor %xmm0, %xmm3
3138 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
3139 ; SSE-NEXT: pcmpeqd %xmm5, %xmm7
3140 ; SSE-NEXT: pcmpgtd %xmm3, %xmm6
3141 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,2,2]
3142 ; SSE-NEXT: pand %xmm7, %xmm3
3143 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
3144 ; SSE-NEXT: por %xmm3, %xmm5
3145 ; SSE-NEXT: pand %xmm5, %xmm2
3146 ; SSE-NEXT: pandn %xmm4, %xmm5
3147 ; SSE-NEXT: por %xmm2, %xmm5
3148 ; SSE-NEXT: movdqa %xmm5, %xmm2
3149 ; SSE-NEXT: pxor %xmm0, %xmm2
3150 ; SSE-NEXT: movdqa %xmm2, %xmm3
3151 ; SSE-NEXT: pcmpgtd %xmm0, %xmm3
3152 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
3153 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
3154 ; SSE-NEXT: pand %xmm3, %xmm2
3155 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
3156 ; SSE-NEXT: por %xmm2, %xmm3
3157 ; SSE-NEXT: pand %xmm5, %xmm3
3158 ; SSE-NEXT: movdqa %xmm1, %xmm2
3159 ; SSE-NEXT: pxor %xmm0, %xmm2
3160 ; SSE-NEXT: movdqa %xmm2, %xmm4
3161 ; SSE-NEXT: pcmpgtd %xmm0, %xmm4
3162 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
3163 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
3164 ; SSE-NEXT: pand %xmm4, %xmm2
3165 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
3166 ; SSE-NEXT: por %xmm2, %xmm0
3167 ; SSE-NEXT: pand %xmm1, %xmm0
3168 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
3171 ; AVX2-LABEL: ustest_f32i32_mm:
3172 ; AVX2: # %bb.0: # %entry
3173 ; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
3174 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
3175 ; AVX2-NEXT: vmovq %rax, %xmm1
3176 ; AVX2-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
3177 ; AVX2-NEXT: vcvttss2si %xmm2, %rax
3178 ; AVX2-NEXT: vmovq %rax, %xmm2
3179 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
3180 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
3181 ; AVX2-NEXT: vmovq %rax, %xmm2
3182 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
3183 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
3184 ; AVX2-NEXT: vmovq %rax, %xmm0
3185 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
3186 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
3187 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
3188 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
3189 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
3190 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
3191 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm1
3192 ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0
3193 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,2,4,6,0,0,0,0]
3194 ; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
3195 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
3196 ; AVX2-NEXT: vzeroupper
3199 ; AVX512-LABEL: ustest_f32i32_mm:
3200 ; AVX512: # %bb.0: # %entry
3201 ; AVX512-NEXT: vcvttps2qq %xmm0, %ymm0
3202 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
3203 ; AVX512-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
3204 ; AVX512-NEXT: vpmovusqd %ymm0, %xmm0
3205 ; AVX512-NEXT: vzeroupper
3208 %conv = fptosi <4 x float> %x to <4 x i64>
3209 %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
3210 %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer)
3211 %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
3212 ret <4 x i32> %conv6
3215 define <4 x i32> @stest_f16i32_mm(<4 x half> %x) nounwind {
3216 ; SSE-LABEL: stest_f16i32_mm:
3217 ; SSE: # %bb.0: # %entry
3218 ; SSE-NEXT: subq $72, %rsp
3219 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
3220 ; SSE-NEXT: movdqa %xmm0, %xmm1
3221 ; SSE-NEXT: psrld $16, %xmm1
3222 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3223 ; SSE-NEXT: movdqa %xmm0, %xmm1
3224 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
3225 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3226 ; SSE-NEXT: psrlq $48, %xmm0
3227 ; SSE-NEXT: callq __extendhfsf2@PLT
3228 ; SSE-NEXT: cvttss2si %xmm0, %rax
3229 ; SSE-NEXT: movq %rax, %xmm0
3230 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3231 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3232 ; SSE-NEXT: callq __extendhfsf2@PLT
3233 ; SSE-NEXT: cvttss2si %xmm0, %rax
3234 ; SSE-NEXT: movq %rax, %xmm0
3235 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3236 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
3237 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3238 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3239 ; SSE-NEXT: callq __extendhfsf2@PLT
3240 ; SSE-NEXT: cvttss2si %xmm0, %rax
3241 ; SSE-NEXT: movq %rax, %xmm0
3242 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
3243 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3244 ; SSE-NEXT: callq __extendhfsf2@PLT
3245 ; SSE-NEXT: cvttss2si %xmm0, %rax
3246 ; SSE-NEXT: movq %rax, %xmm0
3247 ; SSE-NEXT: movdqa (%rsp), %xmm2 # 16-byte Reload
3248 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
3249 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
3250 ; SSE-NEXT: movdqa %xmm2, %xmm1
3251 ; SSE-NEXT: movdqa %xmm2, %xmm7
3252 ; SSE-NEXT: pxor %xmm0, %xmm1
3253 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
3254 ; SSE-NEXT: pxor %xmm3, %xmm3
3255 ; SSE-NEXT: pcmpeqd %xmm3, %xmm2
3256 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [4294967295,4294967295]
3257 ; SSE-NEXT: movdqa %xmm4, %xmm5
3258 ; SSE-NEXT: pcmpgtd %xmm1, %xmm5
3259 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
3260 ; SSE-NEXT: pand %xmm2, %xmm6
3261 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
3262 ; SSE-NEXT: por %xmm6, %xmm1
3263 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483647,2147483647]
3264 ; SSE-NEXT: pand %xmm1, %xmm7
3265 ; SSE-NEXT: pandn %xmm2, %xmm1
3266 ; SSE-NEXT: por %xmm7, %xmm1
3267 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
3268 ; SSE-NEXT: movdqa %xmm7, %xmm5
3269 ; SSE-NEXT: pxor %xmm0, %xmm5
3270 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
3271 ; SSE-NEXT: pcmpeqd %xmm3, %xmm6
3272 ; SSE-NEXT: pcmpgtd %xmm5, %xmm4
3273 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,2,2]
3274 ; SSE-NEXT: pand %xmm6, %xmm3
3275 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
3276 ; SSE-NEXT: por %xmm3, %xmm4
3277 ; SSE-NEXT: movdqa %xmm7, %xmm3
3278 ; SSE-NEXT: pand %xmm4, %xmm3
3279 ; SSE-NEXT: pandn %xmm2, %xmm4
3280 ; SSE-NEXT: por %xmm3, %xmm4
3281 ; SSE-NEXT: movdqa %xmm4, %xmm2
3282 ; SSE-NEXT: pxor %xmm0, %xmm2
3283 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
3284 ; SSE-NEXT: pcmpeqd %xmm5, %xmm5
3285 ; SSE-NEXT: pcmpeqd %xmm5, %xmm3
3286 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [18446744069414584320,18446744069414584320]
3287 ; SSE-NEXT: pcmpgtd %xmm6, %xmm2
3288 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
3289 ; SSE-NEXT: pand %xmm3, %xmm7
3290 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
3291 ; SSE-NEXT: por %xmm7, %xmm2
3292 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [18446744071562067968,18446744071562067968]
3293 ; SSE-NEXT: pand %xmm2, %xmm4
3294 ; SSE-NEXT: pandn %xmm3, %xmm2
3295 ; SSE-NEXT: por %xmm4, %xmm2
3296 ; SSE-NEXT: pxor %xmm1, %xmm0
3297 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
3298 ; SSE-NEXT: pcmpeqd %xmm5, %xmm4
3299 ; SSE-NEXT: pcmpgtd %xmm6, %xmm0
3300 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
3301 ; SSE-NEXT: pand %xmm4, %xmm5
3302 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
3303 ; SSE-NEXT: por %xmm5, %xmm0
3304 ; SSE-NEXT: pand %xmm0, %xmm1
3305 ; SSE-NEXT: pandn %xmm3, %xmm0
3306 ; SSE-NEXT: por %xmm1, %xmm0
3307 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
3308 ; SSE-NEXT: addq $72, %rsp
3311 ; AVX2-LABEL: stest_f16i32_mm:
3312 ; AVX2: # %bb.0: # %entry
3313 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
3314 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
3315 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
3316 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
3317 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
3318 ; AVX2-NEXT: vcvttss2si %xmm1, %rcx
3319 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
3320 ; AVX2-NEXT: vmovq %rax, %xmm2
3321 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
3322 ; AVX2-NEXT: vmovq %rcx, %xmm1
3323 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
3324 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
3325 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
3326 ; AVX2-NEXT: vmovq %rax, %xmm2
3327 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
3328 ; AVX2-NEXT: vmovq %rax, %xmm0
3329 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
3330 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
3331 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
3332 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
3333 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
3334 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
3335 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
3336 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
3337 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
3338 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
3339 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
3340 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
3341 ; AVX2-NEXT: vzeroupper
3344 ; AVX512-LABEL: stest_f16i32_mm:
3345 ; AVX512: # %bb.0: # %entry
3346 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
3347 ; AVX512-NEXT: vcvttps2qq %ymm0, %zmm0
3348 ; AVX512-NEXT: vpmovsqd %ymm0, %xmm0
3349 ; AVX512-NEXT: vzeroupper
3352 %conv = fptosi <4 x half> %x to <4 x i64>
3353 %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
3354 %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>)
3355 %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
3356 ret <4 x i32> %conv6
3359 define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) nounwind {
3360 ; SSE-LABEL: utesth_f16i32_mm:
3361 ; SSE: # %bb.0: # %entry
3362 ; SSE-NEXT: subq $72, %rsp
3363 ; SSE-NEXT: movaps %xmm0, %xmm1
3364 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
3365 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3366 ; SSE-NEXT: movaps %xmm0, %xmm1
3367 ; SSE-NEXT: psrlq $48, %xmm1
3368 ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
3369 ; SSE-NEXT: movaps %xmm0, %xmm1
3370 ; SSE-NEXT: psrld $16, %xmm1
3371 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3372 ; SSE-NEXT: callq __extendhfsf2@PLT
3373 ; SSE-NEXT: cvttss2si %xmm0, %rax
3374 ; SSE-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3375 ; SSE-NEXT: cvttss2si %xmm0, %rcx
3376 ; SSE-NEXT: movq %rax, %rdx
3377 ; SSE-NEXT: sarq $63, %rdx
3378 ; SSE-NEXT: andq %rcx, %rdx
3379 ; SSE-NEXT: orq %rax, %rdx
3380 ; SSE-NEXT: movq %rdx, %xmm0
3381 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3382 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3383 ; SSE-NEXT: callq __extendhfsf2@PLT
3384 ; SSE-NEXT: cvttss2si %xmm0, %rax
3385 ; SSE-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3386 ; SSE-NEXT: cvttss2si %xmm0, %rcx
3387 ; SSE-NEXT: movq %rax, %rdx
3388 ; SSE-NEXT: sarq $63, %rdx
3389 ; SSE-NEXT: andq %rcx, %rdx
3390 ; SSE-NEXT: orq %rax, %rdx
3391 ; SSE-NEXT: movq %rdx, %xmm0
3392 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3393 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
3394 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3395 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3396 ; SSE-NEXT: callq __extendhfsf2@PLT
3397 ; SSE-NEXT: cvttss2si %xmm0, %rax
3398 ; SSE-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3399 ; SSE-NEXT: cvttss2si %xmm0, %rcx
3400 ; SSE-NEXT: movq %rax, %rdx
3401 ; SSE-NEXT: sarq $63, %rdx
3402 ; SSE-NEXT: andq %rcx, %rdx
3403 ; SSE-NEXT: orq %rax, %rdx
3404 ; SSE-NEXT: movq %rdx, %xmm0
3405 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
3406 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3407 ; SSE-NEXT: callq __extendhfsf2@PLT
3408 ; SSE-NEXT: cvttss2si %xmm0, %rax
3409 ; SSE-NEXT: subss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3410 ; SSE-NEXT: cvttss2si %xmm0, %rcx
3411 ; SSE-NEXT: movq %rax, %rdx
3412 ; SSE-NEXT: sarq $63, %rdx
3413 ; SSE-NEXT: andq %rcx, %rdx
3414 ; SSE-NEXT: orq %rax, %rdx
3415 ; SSE-NEXT: movq %rdx, %xmm0
3416 ; SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload
3417 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
3418 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259456,9223372039002259456]
3419 ; SSE-NEXT: movdqa %xmm0, %xmm2
3420 ; SSE-NEXT: pxor %xmm1, %xmm2
3421 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
3422 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2147483647,2147483647,2147483647,2147483647]
3423 ; SSE-NEXT: movdqa %xmm4, %xmm5
3424 ; SSE-NEXT: pcmpgtd %xmm3, %xmm5
3425 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
3426 ; SSE-NEXT: pcmpeqd %xmm1, %xmm2
3427 ; SSE-NEXT: pand %xmm5, %xmm2
3428 ; SSE-NEXT: pcmpeqd %xmm3, %xmm3
3429 ; SSE-NEXT: pand %xmm2, %xmm0
3430 ; SSE-NEXT: pxor %xmm3, %xmm2
3431 ; SSE-NEXT: por %xmm0, %xmm2
3432 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
3433 ; SSE-NEXT: movdqa %xmm6, %xmm0
3434 ; SSE-NEXT: pxor %xmm1, %xmm0
3435 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
3436 ; SSE-NEXT: pcmpgtd %xmm5, %xmm4
3437 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
3438 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
3439 ; SSE-NEXT: pand %xmm4, %xmm0
3440 ; SSE-NEXT: pxor %xmm0, %xmm3
3441 ; SSE-NEXT: pand %xmm6, %xmm0
3442 ; SSE-NEXT: por %xmm3, %xmm0
3443 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
3444 ; SSE-NEXT: addq $72, %rsp
3447 ; AVX2-LABEL: utesth_f16i32_mm:
3448 ; AVX2: # %bb.0: # %entry
3449 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
3450 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm2
3451 ; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [9.22337203E+18,0.0E+0,0.0E+0,0.0E+0]
3452 ; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
3453 ; AVX2-NEXT: vcvttss2si %xmm3, %rax
3454 ; AVX2-NEXT: vcvttss2si %xmm2, %rcx
3455 ; AVX2-NEXT: movq %rcx, %rdx
3456 ; AVX2-NEXT: sarq $63, %rdx
3457 ; AVX2-NEXT: andq %rax, %rdx
3458 ; AVX2-NEXT: orq %rcx, %rdx
3459 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
3460 ; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
3461 ; AVX2-NEXT: vsubss %xmm1, %xmm2, %xmm3
3462 ; AVX2-NEXT: vcvttss2si %xmm3, %rax
3463 ; AVX2-NEXT: vcvttss2si %xmm2, %rcx
3464 ; AVX2-NEXT: vmovq %rdx, %xmm2
3465 ; AVX2-NEXT: movq %rcx, %rdx
3466 ; AVX2-NEXT: sarq $63, %rdx
3467 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
3468 ; AVX2-NEXT: andq %rax, %rdx
3469 ; AVX2-NEXT: vsubss %xmm1, %xmm3, %xmm4
3470 ; AVX2-NEXT: vcvttss2si %xmm4, %rax
3471 ; AVX2-NEXT: orq %rcx, %rdx
3472 ; AVX2-NEXT: vmovq %rdx, %xmm4
3473 ; AVX2-NEXT: vcvttss2si %xmm3, %rcx
3474 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
3475 ; AVX2-NEXT: movq %rcx, %rdx
3476 ; AVX2-NEXT: sarq $63, %rdx
3477 ; AVX2-NEXT: andq %rax, %rdx
3478 ; AVX2-NEXT: orq %rcx, %rdx
3479 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
3480 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
3481 ; AVX2-NEXT: vsubss %xmm1, %xmm0, %xmm1
3482 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
3483 ; AVX2-NEXT: vcvttss2si %xmm0, %rcx
3484 ; AVX2-NEXT: vmovq %rdx, %xmm0
3485 ; AVX2-NEXT: movq %rcx, %rdx
3486 ; AVX2-NEXT: sarq $63, %rdx
3487 ; AVX2-NEXT: andq %rax, %rdx
3488 ; AVX2-NEXT: orq %rcx, %rdx
3489 ; AVX2-NEXT: vmovq %rdx, %xmm1
3490 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
3491 ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
3492 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
3493 ; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
3494 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372041149743102,9223372041149743102,9223372041149743102,9223372041149743102]
3495 ; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm1
3496 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
3497 ; AVX2-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
3498 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [0,2,4,6,0,2,4,6]
3499 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
3500 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
3501 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
3502 ; AVX2-NEXT: vzeroupper
3505 ; AVX512-LABEL: utesth_f16i32_mm:
3506 ; AVX512: # %bb.0: # %entry
3507 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
3508 ; AVX512-NEXT: vcvttps2uqq %ymm0, %zmm0
3509 ; AVX512-NEXT: vpmovusqd %ymm0, %xmm0
3510 ; AVX512-NEXT: vzeroupper
3513 %conv = fptoui <4 x half> %x to <4 x i64>
3514 %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
3515 %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32>
3516 ret <4 x i32> %conv6
3519 define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) nounwind {
3520 ; SSE-LABEL: ustest_f16i32_mm:
3521 ; SSE: # %bb.0: # %entry
3522 ; SSE-NEXT: subq $72, %rsp
3523 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
3524 ; SSE-NEXT: movdqa %xmm0, %xmm1
3525 ; SSE-NEXT: psrld $16, %xmm1
3526 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3527 ; SSE-NEXT: movdqa %xmm0, %xmm1
3528 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
3529 ; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3530 ; SSE-NEXT: psrlq $48, %xmm0
3531 ; SSE-NEXT: callq __extendhfsf2@PLT
3532 ; SSE-NEXT: cvttss2si %xmm0, %rax
3533 ; SSE-NEXT: movq %rax, %xmm0
3534 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3535 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3536 ; SSE-NEXT: callq __extendhfsf2@PLT
3537 ; SSE-NEXT: cvttss2si %xmm0, %rax
3538 ; SSE-NEXT: movq %rax, %xmm0
3539 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3540 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
3541 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3542 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3543 ; SSE-NEXT: callq __extendhfsf2@PLT
3544 ; SSE-NEXT: cvttss2si %xmm0, %rax
3545 ; SSE-NEXT: movq %rax, %xmm0
3546 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
3547 ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
3548 ; SSE-NEXT: callq __extendhfsf2@PLT
3549 ; SSE-NEXT: cvttss2si %xmm0, %rax
3550 ; SSE-NEXT: movq %rax, %xmm0
3551 ; SSE-NEXT: movdqa (%rsp), %xmm2 # 16-byte Reload
3552 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
3553 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
3554 ; SSE-NEXT: movdqa %xmm2, %xmm1
3555 ; SSE-NEXT: movdqa %xmm2, %xmm7
3556 ; SSE-NEXT: pxor %xmm0, %xmm1
3557 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
3558 ; SSE-NEXT: pxor %xmm3, %xmm3
3559 ; SSE-NEXT: pcmpeqd %xmm3, %xmm2
3560 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2147483647,2147483647]
3561 ; SSE-NEXT: movdqa %xmm4, %xmm5
3562 ; SSE-NEXT: pcmpgtd %xmm1, %xmm5
3563 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
3564 ; SSE-NEXT: pand %xmm2, %xmm6
3565 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
3566 ; SSE-NEXT: por %xmm6, %xmm1
3567 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
3568 ; SSE-NEXT: pand %xmm1, %xmm7
3569 ; SSE-NEXT: pandn %xmm2, %xmm1
3570 ; SSE-NEXT: por %xmm7, %xmm1
3571 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
3572 ; SSE-NEXT: movdqa %xmm7, %xmm5
3573 ; SSE-NEXT: pxor %xmm0, %xmm5
3574 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
3575 ; SSE-NEXT: pcmpeqd %xmm3, %xmm6
3576 ; SSE-NEXT: pcmpgtd %xmm5, %xmm4
3577 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,2,2]
3578 ; SSE-NEXT: pand %xmm6, %xmm3
3579 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
3580 ; SSE-NEXT: por %xmm3, %xmm4
3581 ; SSE-NEXT: movdqa %xmm7, %xmm3
3582 ; SSE-NEXT: pand %xmm4, %xmm3
3583 ; SSE-NEXT: pandn %xmm2, %xmm4
3584 ; SSE-NEXT: por %xmm3, %xmm4
3585 ; SSE-NEXT: movdqa %xmm4, %xmm2
3586 ; SSE-NEXT: pxor %xmm0, %xmm2
3587 ; SSE-NEXT: movdqa %xmm2, %xmm3
3588 ; SSE-NEXT: pcmpgtd %xmm0, %xmm3
3589 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
3590 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
3591 ; SSE-NEXT: pand %xmm3, %xmm2
3592 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
3593 ; SSE-NEXT: por %xmm2, %xmm3
3594 ; SSE-NEXT: pand %xmm4, %xmm3
3595 ; SSE-NEXT: movdqa %xmm1, %xmm2
3596 ; SSE-NEXT: pxor %xmm0, %xmm2
3597 ; SSE-NEXT: movdqa %xmm2, %xmm4
3598 ; SSE-NEXT: pcmpgtd %xmm0, %xmm4
3599 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
3600 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
3601 ; SSE-NEXT: pand %xmm4, %xmm2
3602 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
3603 ; SSE-NEXT: por %xmm2, %xmm0
3604 ; SSE-NEXT: pand %xmm1, %xmm0
3605 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
3606 ; SSE-NEXT: addq $72, %rsp
3609 ; AVX2-LABEL: ustest_f16i32_mm:
3610 ; AVX2: # %bb.0: # %entry
3611 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
3612 ; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
3613 ; AVX2-NEXT: vcvttss2si %xmm1, %rax
3614 ; AVX2-NEXT: vmovq %rax, %xmm1
3615 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
3616 ; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
3617 ; AVX2-NEXT: vcvttss2si %xmm2, %rax
3618 ; AVX2-NEXT: vmovq %rax, %xmm2
3619 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
3620 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
3621 ; AVX2-NEXT: vcvttss2si %xmm2, %rax
3622 ; AVX2-NEXT: vmovq %rax, %xmm2
3623 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
3624 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
3625 ; AVX2-NEXT: vcvttss2si %xmm0, %rax
3626 ; AVX2-NEXT: vmovq %rax, %xmm0
3627 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
3628 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
3629 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
3630 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
3631 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
3632 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
3633 ; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm1
3634 ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0
3635 ; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,2,4,6,0,0,0,0]
3636 ; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
3637 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
3638 ; AVX2-NEXT: vzeroupper
3641 ; AVX512-LABEL: ustest_f16i32_mm:
3642 ; AVX512: # %bb.0: # %entry
3643 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
3644 ; AVX512-NEXT: vcvttps2qq %ymm0, %zmm0
3645 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
3646 ; AVX512-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
3647 ; AVX512-NEXT: vpmovusqd %ymm0, %xmm0
3648 ; AVX512-NEXT: vzeroupper
3651 %conv = fptosi <4 x half> %x to <4 x i64>
3652 %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
3653 %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer)
3654 %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
3655 ret <4 x i32> %conv6
3660 define <2 x i16> @stest_f64i16_mm(<2 x double> %x) nounwind {
3661 ; SSE-LABEL: stest_f64i16_mm:
3662 ; SSE: # %bb.0: # %entry
3663 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
3664 ; SSE-NEXT: packssdw %xmm0, %xmm0
3667 ; AVX-LABEL: stest_f64i16_mm:
3668 ; AVX: # %bb.0: # %entry
3669 ; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
3670 ; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
3673 %conv = fptosi <2 x double> %x to <2 x i32>
3674 %spec.store.select = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %conv, <2 x i32> <i32 32767, i32 32767>)
3675 %spec.store.select7 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %spec.store.select, <2 x i32> <i32 -32768, i32 -32768>)
3676 %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16>
3677 ret <2 x i16> %conv6
3680 define <2 x i16> @utest_f64i16_mm(<2 x double> %x) nounwind {
3681 ; SSE-LABEL: utest_f64i16_mm:
3682 ; SSE: # %bb.0: # %entry
3683 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm1
3684 ; SSE-NEXT: movapd %xmm1, %xmm2
3685 ; SSE-NEXT: psrad $31, %xmm2
3686 ; SSE-NEXT: addpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3687 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
3688 ; SSE-NEXT: andpd %xmm2, %xmm0
3689 ; SSE-NEXT: orpd %xmm1, %xmm0
3690 ; SSE-NEXT: movapd {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
3691 ; SSE-NEXT: xorpd %xmm0, %xmm1
3692 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
3693 ; SSE-NEXT: movdqa %xmm1, %xmm2
3694 ; SSE-NEXT: pandn %xmm0, %xmm2
3695 ; SSE-NEXT: psrld $16, %xmm1
3696 ; SSE-NEXT: por %xmm2, %xmm1
3697 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
3700 ; AVX2-LABEL: utest_f64i16_mm:
3701 ; AVX2: # %bb.0: # %entry
3702 ; AVX2-NEXT: vcvttpd2dq %xmm0, %xmm1
3703 ; AVX2-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
3704 ; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
3705 ; AVX2-NEXT: vcvttpd2dq %xmm0, %xmm0
3706 ; AVX2-NEXT: vandpd %xmm2, %xmm0, %xmm0
3707 ; AVX2-NEXT: vorpd %xmm0, %xmm1, %xmm0
3708 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65535,65535,65535,65535]
3709 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
3710 ; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
3713 ; AVX512-LABEL: utest_f64i16_mm:
3714 ; AVX512: # %bb.0: # %entry
3715 ; AVX512-NEXT: vcvttpd2udq %xmm0, %xmm0
3716 ; AVX512-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
3717 ; AVX512-NEXT: vpmovdw %xmm0, %xmm0
3720 %conv = fptoui <2 x double> %x to <2 x i32>
3721 %spec.store.select = call <2 x i32> @llvm.umin.v2i32(<2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>)
3722 %conv6 = trunc <2 x i32> %spec.store.select to <2 x i16>
3723 ret <2 x i16> %conv6
3726 define <2 x i16> @ustest_f64i16_mm(<2 x double> %x) nounwind {
3727 ; SSE-LABEL: ustest_f64i16_mm:
3728 ; SSE: # %bb.0: # %entry
3729 ; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
3730 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,u,u]
3731 ; SSE-NEXT: movdqa %xmm1, %xmm2
3732 ; SSE-NEXT: pcmpgtd %xmm0, %xmm2
3733 ; SSE-NEXT: pand %xmm2, %xmm0
3734 ; SSE-NEXT: pandn %xmm1, %xmm2
3735 ; SSE-NEXT: por %xmm0, %xmm2
3736 ; SSE-NEXT: pxor %xmm0, %xmm0
3737 ; SSE-NEXT: movdqa %xmm2, %xmm1
3738 ; SSE-NEXT: pcmpgtd %xmm0, %xmm1
3739 ; SSE-NEXT: pand %xmm2, %xmm1
3740 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
3743 ; AVX-LABEL: ustest_f64i16_mm:
3744 ; AVX: # %bb.0: # %entry
3745 ; AVX-NEXT: vcvttpd2dq %xmm0, %xmm0
3746 ; AVX-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
3749 %conv = fptosi <2 x double> %x to <2 x i32>
3750 %spec.store.select = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>)
3751 %spec.store.select7 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %spec.store.select, <2 x i32> zeroinitializer)
3752 %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16>
3753 ret <2 x i16> %conv6
3756 define <4 x i16> @stest_f32i16_mm(<4 x float> %x) nounwind {
3757 ; SSE-LABEL: stest_f32i16_mm:
3758 ; SSE: # %bb.0: # %entry
3759 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3760 ; SSE-NEXT: packssdw %xmm0, %xmm0
3763 ; AVX-LABEL: stest_f32i16_mm:
3764 ; AVX: # %bb.0: # %entry
3765 ; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
3766 ; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
3769 %conv = fptosi <4 x float> %x to <4 x i32>
3770 %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %conv, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>)
3771 %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>)
3772 %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16>
3773 ret <4 x i16> %conv6
3776 define <4 x i16> @utest_f32i16_mm(<4 x float> %x) nounwind {
3777 ; SSE-LABEL: utest_f32i16_mm:
3778 ; SSE: # %bb.0: # %entry
3779 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
3780 ; SSE-NEXT: movdqa %xmm1, %xmm2
3781 ; SSE-NEXT: psrad $31, %xmm2
3782 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3783 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3784 ; SSE-NEXT: pand %xmm2, %xmm0
3785 ; SSE-NEXT: por %xmm1, %xmm0
3786 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
3787 ; SSE-NEXT: pxor %xmm0, %xmm1
3788 ; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
3789 ; SSE-NEXT: movdqa %xmm1, %xmm2
3790 ; SSE-NEXT: pandn %xmm0, %xmm2
3791 ; SSE-NEXT: psrld $16, %xmm1
3792 ; SSE-NEXT: por %xmm2, %xmm1
3793 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
3794 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
3795 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
3798 ; AVX2-LABEL: utest_f32i16_mm:
3799 ; AVX2: # %bb.0: # %entry
3800 ; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
3801 ; AVX2-NEXT: vsubps %xmm1, %xmm0, %xmm1
3802 ; AVX2-NEXT: vcvttps2dq %xmm1, %xmm1
3803 ; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
3804 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm2
3805 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
3806 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
3807 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65535,65535,65535,65535]
3808 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
3809 ; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
3812 ; AVX512-LABEL: utest_f32i16_mm:
3813 ; AVX512: # %bb.0: # %entry
3814 ; AVX512-NEXT: vcvttps2udq %xmm0, %xmm0
3815 ; AVX512-NEXT: vpmovusdw %xmm0, %xmm0
3818 %conv = fptoui <4 x float> %x to <4 x i32>
3819 %spec.store.select = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>)
3820 %conv6 = trunc <4 x i32> %spec.store.select to <4 x i16>
3821 ret <4 x i16> %conv6
3824 define <4 x i16> @ustest_f32i16_mm(<4 x float> %x) nounwind {
3825 ; SSE-LABEL: ustest_f32i16_mm:
3826 ; SSE: # %bb.0: # %entry
3827 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3828 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
3829 ; SSE-NEXT: movdqa %xmm1, %xmm2
3830 ; SSE-NEXT: pcmpgtd %xmm0, %xmm2
3831 ; SSE-NEXT: pand %xmm2, %xmm0
3832 ; SSE-NEXT: pandn %xmm1, %xmm2
3833 ; SSE-NEXT: por %xmm0, %xmm2
3834 ; SSE-NEXT: pxor %xmm0, %xmm0
3835 ; SSE-NEXT: movdqa %xmm2, %xmm1
3836 ; SSE-NEXT: pcmpgtd %xmm0, %xmm1
3837 ; SSE-NEXT: pand %xmm2, %xmm1
3838 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,2,2,3,4,5,6,7]
3839 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
3840 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
3843 ; AVX-LABEL: ustest_f32i16_mm:
3844 ; AVX: # %bb.0: # %entry
3845 ; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
3846 ; AVX-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
3849 %conv = fptosi <4 x float> %x to <4 x i32>
3850 %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>)
3851 %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
3852 %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16>
3853 ret <4 x i16> %conv6
3856 define <8 x i16> @stest_f16i16_mm(<8 x half> %x) nounwind {
3857 ; SSE-LABEL: stest_f16i16_mm:
3858 ; SSE: # %bb.0: # %entry
3859 ; SSE-NEXT: subq $72, %rsp
3860 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
3861 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3862 ; SSE-NEXT: callq __extendhfsf2@PLT
3863 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3864 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3865 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
3866 ; SSE-NEXT: callq __extendhfsf2@PLT
3867 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3868 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3869 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3870 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3871 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
3872 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3873 ; SSE-NEXT: callq __extendhfsf2@PLT
3874 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3875 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3876 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
3877 ; SSE-NEXT: callq __extendhfsf2@PLT
3878 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3879 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3880 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3881 ; SSE-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3882 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
3883 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3884 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
3885 ; SSE-NEXT: psrlq $48, %xmm0
3886 ; SSE-NEXT: callq __extendhfsf2@PLT
3887 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3888 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3889 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
3890 ; SSE-NEXT: callq __extendhfsf2@PLT
3891 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3892 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3893 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3894 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3895 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3896 ; SSE-NEXT: callq __extendhfsf2@PLT
3897 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3898 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
3899 ; SSE-NEXT: psrld $16, %xmm0
3900 ; SSE-NEXT: callq __extendhfsf2@PLT
3901 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
3902 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
3903 ; SSE-NEXT: cvttps2dq %xmm1, %xmm0
3904 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3905 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
3906 ; SSE-NEXT: packssdw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3907 ; SSE-NEXT: addq $72, %rsp
3910 ; AVX2-LABEL: stest_f16i16_mm:
3911 ; AVX2: # %bb.0: # %entry
3912 ; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
3913 ; AVX2-NEXT: vcvttps2dq %ymm0, %ymm0
3914 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
3915 ; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
3916 ; AVX2-NEXT: vzeroupper
3919 ; AVX512-LABEL: stest_f16i16_mm:
3920 ; AVX512: # %bb.0: # %entry
3921 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
3922 ; AVX512-NEXT: vcvttps2dq %ymm0, %ymm0
3923 ; AVX512-NEXT: vpmovsdw %ymm0, %xmm0
3924 ; AVX512-NEXT: vzeroupper
3927 %conv = fptosi <8 x half> %x to <8 x i32>
3928 %spec.store.select = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %conv, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
3929 %spec.store.select7 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %spec.store.select, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>)
3930 %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16>
3931 ret <8 x i16> %conv6
3934 define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) nounwind {
3935 ; SSE-LABEL: utesth_f16i16_mm:
3936 ; SSE: # %bb.0: # %entry
3937 ; SSE-NEXT: subq $72, %rsp
3938 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
3939 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3940 ; SSE-NEXT: callq __extendhfsf2@PLT
3941 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3942 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3943 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
3944 ; SSE-NEXT: callq __extendhfsf2@PLT
3945 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3946 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3947 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
3948 ; SSE-NEXT: movdqa %xmm1, %xmm2
3949 ; SSE-NEXT: psrad $31, %xmm2
3950 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3951 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3952 ; SSE-NEXT: pand %xmm2, %xmm0
3953 ; SSE-NEXT: por %xmm1, %xmm0
3954 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3955 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
3956 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3957 ; SSE-NEXT: callq __extendhfsf2@PLT
3958 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3959 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3960 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
3961 ; SSE-NEXT: callq __extendhfsf2@PLT
3962 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3963 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3964 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
3965 ; SSE-NEXT: movdqa %xmm1, %xmm2
3966 ; SSE-NEXT: psrad $31, %xmm2
3967 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3968 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3969 ; SSE-NEXT: pand %xmm2, %xmm0
3970 ; SSE-NEXT: por %xmm1, %xmm0
3971 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3972 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
3973 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3974 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
3975 ; SSE-NEXT: psrlq $48, %xmm0
3976 ; SSE-NEXT: callq __extendhfsf2@PLT
3977 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3978 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3979 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
3980 ; SSE-NEXT: callq __extendhfsf2@PLT
3981 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
3982 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
3983 ; SSE-NEXT: cvttps2dq %xmm0, %xmm1
3984 ; SSE-NEXT: movdqa %xmm1, %xmm2
3985 ; SSE-NEXT: psrad $31, %xmm2
3986 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3987 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
3988 ; SSE-NEXT: pand %xmm2, %xmm0
3989 ; SSE-NEXT: por %xmm1, %xmm0
3990 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3991 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
3992 ; SSE-NEXT: callq __extendhfsf2@PLT
3993 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
3994 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
3995 ; SSE-NEXT: psrld $16, %xmm0
3996 ; SSE-NEXT: callq __extendhfsf2@PLT
3997 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
3998 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
3999 ; SSE-NEXT: cvttps2dq %xmm2, %xmm0
4000 ; SSE-NEXT: movdqa %xmm0, %xmm1
4001 ; SSE-NEXT: psrad $31, %xmm1
4002 ; SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
4003 ; SSE-NEXT: cvttps2dq %xmm2, %xmm2
4004 ; SSE-NEXT: pand %xmm1, %xmm2
4005 ; SSE-NEXT: por %xmm0, %xmm2
4006 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
4007 ; SSE-NEXT: # xmm2 = xmm2[0],mem[0]
4008 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
4009 ; SSE-NEXT: movdqa %xmm2, %xmm3
4010 ; SSE-NEXT: pxor %xmm1, %xmm3
4011 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [2147549183,2147549183,2147549183,2147549183]
4012 ; SSE-NEXT: movdqa %xmm4, %xmm0
4013 ; SSE-NEXT: pcmpgtd %xmm3, %xmm0
4014 ; SSE-NEXT: pand %xmm0, %xmm2
4015 ; SSE-NEXT: pcmpeqd %xmm3, %xmm3
4016 ; SSE-NEXT: pxor %xmm3, %xmm0
4017 ; SSE-NEXT: por %xmm2, %xmm0
4018 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
4019 ; SSE-NEXT: pxor %xmm2, %xmm1
4020 ; SSE-NEXT: pcmpgtd %xmm1, %xmm4
4021 ; SSE-NEXT: pand %xmm4, %xmm2
4022 ; SSE-NEXT: pxor %xmm3, %xmm4
4023 ; SSE-NEXT: por %xmm2, %xmm4
4024 ; SSE-NEXT: pslld $16, %xmm4
4025 ; SSE-NEXT: psrad $16, %xmm4
4026 ; SSE-NEXT: pslld $16, %xmm0
4027 ; SSE-NEXT: psrad $16, %xmm0
4028 ; SSE-NEXT: packssdw %xmm4, %xmm0
4029 ; SSE-NEXT: addq $72, %rsp
4032 ; AVX2-LABEL: utesth_f16i16_mm:
4033 ; AVX2: # %bb.0: # %entry
4034 ; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
4035 ; AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
4036 ; AVX2-NEXT: vsubps %ymm1, %ymm0, %ymm1
4037 ; AVX2-NEXT: vcvttps2dq %ymm1, %ymm1
4038 ; AVX2-NEXT: vcvttps2dq %ymm0, %ymm0
4039 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm2
4040 ; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
4041 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
4042 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
4043 ; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
4044 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
4045 ; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
4046 ; AVX2-NEXT: vzeroupper
4049 ; AVX512-LABEL: utesth_f16i16_mm:
4050 ; AVX512: # %bb.0: # %entry
4051 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
4052 ; AVX512-NEXT: vcvttps2udq %ymm0, %ymm0
4053 ; AVX512-NEXT: vpmovusdw %ymm0, %xmm0
4054 ; AVX512-NEXT: vzeroupper
4057 %conv = fptoui <8 x half> %x to <8 x i32>
4058 %spec.store.select = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>)
4059 %conv6 = trunc <8 x i32> %spec.store.select to <8 x i16>
4060 ret <8 x i16> %conv6
4063 define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) nounwind {
4064 ; SSE-LABEL: ustest_f16i16_mm:
4065 ; SSE: # %bb.0: # %entry
4066 ; SSE-NEXT: subq $72, %rsp
4067 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
4068 ; SSE-NEXT: psrlq $48, %xmm0
4069 ; SSE-NEXT: callq __extendhfsf2@PLT
4070 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4071 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4072 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
4073 ; SSE-NEXT: callq __extendhfsf2@PLT
4074 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4075 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
4076 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
4077 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4078 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4079 ; SSE-NEXT: callq __extendhfsf2@PLT
4080 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4081 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
4082 ; SSE-NEXT: psrld $16, %xmm0
4083 ; SSE-NEXT: callq __extendhfsf2@PLT
4084 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
4085 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
4086 ; SSE-NEXT: cvttps2dq %xmm1, %xmm0
4087 ; SSE-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4088 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
4089 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4090 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
4091 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4092 ; SSE-NEXT: callq __extendhfsf2@PLT
4093 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4094 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4095 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
4096 ; SSE-NEXT: callq __extendhfsf2@PLT
4097 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4098 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
4099 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
4100 ; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4101 ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
4102 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4103 ; SSE-NEXT: callq __extendhfsf2@PLT
4104 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
4105 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4106 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
4107 ; SSE-NEXT: callq __extendhfsf2@PLT
4108 ; SSE-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4109 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
4110 ; SSE-NEXT: cvttps2dq %xmm0, %xmm0
4111 ; SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
4112 ; SSE-NEXT: # xmm0 = xmm0[0],mem[0]
4113 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
4114 ; SSE-NEXT: movdqa %xmm1, %xmm2
4115 ; SSE-NEXT: pcmpgtd %xmm0, %xmm2
4116 ; SSE-NEXT: pand %xmm2, %xmm0
4117 ; SSE-NEXT: pandn %xmm1, %xmm2
4118 ; SSE-NEXT: por %xmm0, %xmm2
4119 ; SSE-NEXT: movdqa %xmm1, %xmm3
4120 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
4121 ; SSE-NEXT: pcmpgtd %xmm0, %xmm3
4122 ; SSE-NEXT: pand %xmm3, %xmm0
4123 ; SSE-NEXT: pandn %xmm1, %xmm3
4124 ; SSE-NEXT: por %xmm0, %xmm3
4125 ; SSE-NEXT: pxor %xmm1, %xmm1
4126 ; SSE-NEXT: movdqa %xmm3, %xmm0
4127 ; SSE-NEXT: pcmpgtd %xmm1, %xmm0
4128 ; SSE-NEXT: pand %xmm3, %xmm0
4129 ; SSE-NEXT: movdqa %xmm2, %xmm3
4130 ; SSE-NEXT: pcmpgtd %xmm1, %xmm3
4131 ; SSE-NEXT: pand %xmm2, %xmm3
4132 ; SSE-NEXT: pslld $16, %xmm3
4133 ; SSE-NEXT: psrad $16, %xmm3
4134 ; SSE-NEXT: pslld $16, %xmm0
4135 ; SSE-NEXT: psrad $16, %xmm0
4136 ; SSE-NEXT: packssdw %xmm3, %xmm0
4137 ; SSE-NEXT: addq $72, %rsp
4140 ; AVX2-LABEL: ustest_f16i16_mm:
4141 ; AVX2: # %bb.0: # %entry
4142 ; AVX2-NEXT: vcvtph2ps %xmm0, %ymm0
4143 ; AVX2-NEXT: vcvttps2dq %ymm0, %ymm0
4144 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
4145 ; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
4146 ; AVX2-NEXT: vzeroupper
4149 ; AVX512-LABEL: ustest_f16i16_mm:
4150 ; AVX512: # %bb.0: # %entry
4151 ; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
4152 ; AVX512-NEXT: vcvttps2dq %ymm0, %ymm0
4153 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
4154 ; AVX512-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
4155 ; AVX512-NEXT: vpmovusdw %ymm0, %xmm0
4156 ; AVX512-NEXT: vzeroupper
4159 %conv = fptosi <8 x half> %x to <8 x i32>
4160 %spec.store.select = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>)
4161 %spec.store.select7 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %spec.store.select, <8 x i32> zeroinitializer)
4162 %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16>
4163 ret <8 x i16> %conv6
4168 define <2 x i64> @stest_f64i64_mm(<2 x double> %x) nounwind {
4169 ; SSE-LABEL: stest_f64i64_mm:
4170 ; SSE: # %bb.0: # %entry
4171 ; SSE-NEXT: pushq %r14
4172 ; SSE-NEXT: pushq %rbx
4173 ; SSE-NEXT: subq $24, %rsp
4174 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
4175 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
4176 ; SSE-NEXT: callq __fixdfti@PLT
4177 ; SSE-NEXT: movq %rax, %rbx
4178 ; SSE-NEXT: movq %rdx, %r14
4179 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4180 ; SSE-NEXT: callq __fixdfti@PLT
4181 ; SSE-NEXT: xorl %ecx, %ecx
4182 ; SSE-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4183 ; SSE-NEXT: cmpq %rsi, %rax
4184 ; SSE-NEXT: movq %rdx, %rdi
4185 ; SSE-NEXT: sbbq $0, %rdi
4186 ; SSE-NEXT: cmovgeq %rcx, %rdx
4187 ; SSE-NEXT: cmovgeq %rsi, %rax
4188 ; SSE-NEXT: cmpq %rsi, %rbx
4189 ; SSE-NEXT: movq %r14, %rdi
4190 ; SSE-NEXT: sbbq $0, %rdi
4191 ; SSE-NEXT: cmovlq %r14, %rcx
4192 ; SSE-NEXT: cmovlq %rbx, %rsi
4193 ; SSE-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4194 ; SSE-NEXT: cmpq %rsi, %rdi
4195 ; SSE-NEXT: movq $-1, %r8
4196 ; SSE-NEXT: movq $-1, %r9
4197 ; SSE-NEXT: sbbq %rcx, %r9
4198 ; SSE-NEXT: cmovgeq %rdi, %rsi
4199 ; SSE-NEXT: cmpq %rax, %rdi
4200 ; SSE-NEXT: sbbq %rdx, %r8
4201 ; SSE-NEXT: cmovgeq %rdi, %rax
4202 ; SSE-NEXT: movq %rax, %xmm0
4203 ; SSE-NEXT: movq %rsi, %xmm1
4204 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4205 ; SSE-NEXT: addq $24, %rsp
4206 ; SSE-NEXT: popq %rbx
4207 ; SSE-NEXT: popq %r14
4210 ; AVX2-LABEL: stest_f64i64_mm:
4211 ; AVX2: # %bb.0: # %entry
4212 ; AVX2-NEXT: pushq %r14
4213 ; AVX2-NEXT: pushq %rbx
4214 ; AVX2-NEXT: subq $24, %rsp
4215 ; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
4216 ; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
4217 ; AVX2-NEXT: callq __fixdfti@PLT
4218 ; AVX2-NEXT: movq %rax, %rbx
4219 ; AVX2-NEXT: movq %rdx, %r14
4220 ; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4221 ; AVX2-NEXT: callq __fixdfti@PLT
4222 ; AVX2-NEXT: xorl %ecx, %ecx
4223 ; AVX2-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4224 ; AVX2-NEXT: cmpq %rsi, %rax
4225 ; AVX2-NEXT: movq %rdx, %rdi
4226 ; AVX2-NEXT: sbbq $0, %rdi
4227 ; AVX2-NEXT: cmovgeq %rcx, %rdx
4228 ; AVX2-NEXT: cmovgeq %rsi, %rax
4229 ; AVX2-NEXT: cmpq %rsi, %rbx
4230 ; AVX2-NEXT: movq %r14, %rdi
4231 ; AVX2-NEXT: sbbq $0, %rdi
4232 ; AVX2-NEXT: cmovlq %r14, %rcx
4233 ; AVX2-NEXT: cmovlq %rbx, %rsi
4234 ; AVX2-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4235 ; AVX2-NEXT: cmpq %rsi, %rdi
4236 ; AVX2-NEXT: movq $-1, %r8
4237 ; AVX2-NEXT: sbbq %rcx, %r8
4238 ; AVX2-NEXT: movq $-1, %rcx
4239 ; AVX2-NEXT: cmovgeq %rdi, %rsi
4240 ; AVX2-NEXT: cmpq %rax, %rdi
4241 ; AVX2-NEXT: sbbq %rdx, %rcx
4242 ; AVX2-NEXT: cmovgeq %rdi, %rax
4243 ; AVX2-NEXT: vmovq %rax, %xmm0
4244 ; AVX2-NEXT: vmovq %rsi, %xmm1
4245 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4246 ; AVX2-NEXT: addq $24, %rsp
4247 ; AVX2-NEXT: popq %rbx
4248 ; AVX2-NEXT: popq %r14
4251 ; AVX512-LABEL: stest_f64i64_mm:
4252 ; AVX512: # %bb.0: # %entry
4253 ; AVX512-NEXT: pushq %r14
4254 ; AVX512-NEXT: pushq %rbx
4255 ; AVX512-NEXT: subq $24, %rsp
4256 ; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
4257 ; AVX512-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
4258 ; AVX512-NEXT: callq __fixdfti@PLT
4259 ; AVX512-NEXT: movq %rax, %rbx
4260 ; AVX512-NEXT: movq %rdx, %r14
4261 ; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4262 ; AVX512-NEXT: callq __fixdfti@PLT
4263 ; AVX512-NEXT: xorl %ecx, %ecx
4264 ; AVX512-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4265 ; AVX512-NEXT: cmpq %rsi, %rax
4266 ; AVX512-NEXT: movq %rdx, %rdi
4267 ; AVX512-NEXT: sbbq $0, %rdi
4268 ; AVX512-NEXT: cmovgeq %rcx, %rdx
4269 ; AVX512-NEXT: cmovgeq %rsi, %rax
4270 ; AVX512-NEXT: cmpq %rsi, %rbx
4271 ; AVX512-NEXT: movq %r14, %rdi
4272 ; AVX512-NEXT: sbbq $0, %rdi
4273 ; AVX512-NEXT: cmovlq %r14, %rcx
4274 ; AVX512-NEXT: cmovlq %rbx, %rsi
4275 ; AVX512-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4276 ; AVX512-NEXT: cmpq %rsi, %rdi
4277 ; AVX512-NEXT: movq $-1, %r8
4278 ; AVX512-NEXT: movq $-1, %r9
4279 ; AVX512-NEXT: sbbq %rcx, %r9
4280 ; AVX512-NEXT: cmovgeq %rdi, %rsi
4281 ; AVX512-NEXT: cmpq %rax, %rdi
4282 ; AVX512-NEXT: sbbq %rdx, %r8
4283 ; AVX512-NEXT: cmovgeq %rdi, %rax
4284 ; AVX512-NEXT: vmovq %rax, %xmm0
4285 ; AVX512-NEXT: vmovq %rsi, %xmm1
4286 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4287 ; AVX512-NEXT: addq $24, %rsp
4288 ; AVX512-NEXT: popq %rbx
4289 ; AVX512-NEXT: popq %r14
4292 %conv = fptosi <2 x double> %x to <2 x i128>
4293 %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>)
4294 %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>)
4295 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
4296 ret <2 x i64> %conv6
4299 define <2 x i64> @utest_f64i64_mm(<2 x double> %x) nounwind {
4300 ; SSE-LABEL: utest_f64i64_mm:
4301 ; SSE: # %bb.0: # %entry
4302 ; SSE-NEXT: pushq %r14
4303 ; SSE-NEXT: pushq %rbx
4304 ; SSE-NEXT: subq $24, %rsp
4305 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
4306 ; SSE-NEXT: callq __fixunsdfti@PLT
4307 ; SSE-NEXT: movq %rax, %rbx
4308 ; SSE-NEXT: movq %rdx, %r14
4309 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4310 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
4311 ; SSE-NEXT: callq __fixunsdfti@PLT
4312 ; SSE-NEXT: xorl %ecx, %ecx
4313 ; SSE-NEXT: testq %rdx, %rdx
4314 ; SSE-NEXT: cmovneq %rcx, %rax
4315 ; SSE-NEXT: testq %r14, %r14
4316 ; SSE-NEXT: cmovneq %rcx, %rbx
4317 ; SSE-NEXT: movq %rbx, %xmm0
4318 ; SSE-NEXT: movq %rax, %xmm1
4319 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4320 ; SSE-NEXT: addq $24, %rsp
4321 ; SSE-NEXT: popq %rbx
4322 ; SSE-NEXT: popq %r14
4325 ; AVX-LABEL: utest_f64i64_mm:
4326 ; AVX: # %bb.0: # %entry
4327 ; AVX-NEXT: pushq %r14
4328 ; AVX-NEXT: pushq %rbx
4329 ; AVX-NEXT: subq $24, %rsp
4330 ; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
4331 ; AVX-NEXT: callq __fixunsdfti@PLT
4332 ; AVX-NEXT: movq %rax, %rbx
4333 ; AVX-NEXT: movq %rdx, %r14
4334 ; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
4335 ; AVX-NEXT: # xmm0 = mem[1,0]
4336 ; AVX-NEXT: callq __fixunsdfti@PLT
4337 ; AVX-NEXT: xorl %ecx, %ecx
4338 ; AVX-NEXT: testq %rdx, %rdx
4339 ; AVX-NEXT: cmovneq %rcx, %rax
4340 ; AVX-NEXT: testq %r14, %r14
4341 ; AVX-NEXT: cmovneq %rcx, %rbx
4342 ; AVX-NEXT: vmovq %rbx, %xmm0
4343 ; AVX-NEXT: vmovq %rax, %xmm1
4344 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4345 ; AVX-NEXT: addq $24, %rsp
4346 ; AVX-NEXT: popq %rbx
4347 ; AVX-NEXT: popq %r14
4350 %conv = fptoui <2 x double> %x to <2 x i128>
4351 %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
4352 %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
4353 ret <2 x i64> %conv6
4356 define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) nounwind {
4357 ; SSE-LABEL: ustest_f64i64_mm:
4358 ; SSE: # %bb.0: # %entry
4359 ; SSE-NEXT: pushq %r14
4360 ; SSE-NEXT: pushq %rbx
4361 ; SSE-NEXT: subq $24, %rsp
4362 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
4363 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
4364 ; SSE-NEXT: callq __fixdfti@PLT
4365 ; SSE-NEXT: movq %rax, %rbx
4366 ; SSE-NEXT: movq %rdx, %r14
4367 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4368 ; SSE-NEXT: callq __fixdfti@PLT
4369 ; SSE-NEXT: xorl %ecx, %ecx
4370 ; SSE-NEXT: testq %rdx, %rdx
4371 ; SSE-NEXT: cmovgq %rcx, %rax
4372 ; SSE-NEXT: movl $1, %esi
4373 ; SSE-NEXT: cmovgq %rsi, %rdx
4374 ; SSE-NEXT: testq %r14, %r14
4375 ; SSE-NEXT: cmovgq %rcx, %rbx
4376 ; SSE-NEXT: cmovleq %r14, %rsi
4377 ; SSE-NEXT: testq %rsi, %rsi
4378 ; SSE-NEXT: cmovsq %rcx, %rbx
4379 ; SSE-NEXT: testq %rdx, %rdx
4380 ; SSE-NEXT: cmovsq %rcx, %rax
4381 ; SSE-NEXT: movq %rax, %xmm0
4382 ; SSE-NEXT: movq %rbx, %xmm1
4383 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4384 ; SSE-NEXT: addq $24, %rsp
4385 ; SSE-NEXT: popq %rbx
4386 ; SSE-NEXT: popq %r14
4389 ; AVX-LABEL: ustest_f64i64_mm:
4390 ; AVX: # %bb.0: # %entry
4391 ; AVX-NEXT: pushq %r14
4392 ; AVX-NEXT: pushq %rbx
4393 ; AVX-NEXT: subq $24, %rsp
4394 ; AVX-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
4395 ; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
4396 ; AVX-NEXT: callq __fixdfti@PLT
4397 ; AVX-NEXT: movq %rax, %rbx
4398 ; AVX-NEXT: movq %rdx, %r14
4399 ; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4400 ; AVX-NEXT: callq __fixdfti@PLT
4401 ; AVX-NEXT: xorl %ecx, %ecx
4402 ; AVX-NEXT: testq %rdx, %rdx
4403 ; AVX-NEXT: cmovgq %rcx, %rax
4404 ; AVX-NEXT: movl $1, %esi
4405 ; AVX-NEXT: cmovgq %rsi, %rdx
4406 ; AVX-NEXT: testq %r14, %r14
4407 ; AVX-NEXT: cmovgq %rcx, %rbx
4408 ; AVX-NEXT: cmovleq %r14, %rsi
4409 ; AVX-NEXT: testq %rsi, %rsi
4410 ; AVX-NEXT: cmovsq %rcx, %rbx
4411 ; AVX-NEXT: testq %rdx, %rdx
4412 ; AVX-NEXT: cmovsq %rcx, %rax
4413 ; AVX-NEXT: vmovq %rax, %xmm0
4414 ; AVX-NEXT: vmovq %rbx, %xmm1
4415 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4416 ; AVX-NEXT: addq $24, %rsp
4417 ; AVX-NEXT: popq %rbx
4418 ; AVX-NEXT: popq %r14
4421 %conv = fptosi <2 x double> %x to <2 x i128>
4422 %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
4423 %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer)
4424 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
4425 ret <2 x i64> %conv6
4428 define <2 x i64> @stest_f32i64_mm(<2 x float> %x) nounwind {
4429 ; SSE-LABEL: stest_f32i64_mm:
4430 ; SSE: # %bb.0: # %entry
4431 ; SSE-NEXT: pushq %r14
4432 ; SSE-NEXT: pushq %rbx
4433 ; SSE-NEXT: subq $24, %rsp
4434 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
4435 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
4436 ; SSE-NEXT: callq __fixsfti@PLT
4437 ; SSE-NEXT: movq %rax, %rbx
4438 ; SSE-NEXT: movq %rdx, %r14
4439 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4440 ; SSE-NEXT: callq __fixsfti@PLT
4441 ; SSE-NEXT: xorl %ecx, %ecx
4442 ; SSE-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4443 ; SSE-NEXT: cmpq %rsi, %rax
4444 ; SSE-NEXT: movq %rdx, %rdi
4445 ; SSE-NEXT: sbbq $0, %rdi
4446 ; SSE-NEXT: cmovgeq %rcx, %rdx
4447 ; SSE-NEXT: cmovgeq %rsi, %rax
4448 ; SSE-NEXT: cmpq %rsi, %rbx
4449 ; SSE-NEXT: movq %r14, %rdi
4450 ; SSE-NEXT: sbbq $0, %rdi
4451 ; SSE-NEXT: cmovlq %r14, %rcx
4452 ; SSE-NEXT: cmovlq %rbx, %rsi
4453 ; SSE-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4454 ; SSE-NEXT: cmpq %rsi, %rdi
4455 ; SSE-NEXT: movq $-1, %r8
4456 ; SSE-NEXT: movq $-1, %r9
4457 ; SSE-NEXT: sbbq %rcx, %r9
4458 ; SSE-NEXT: cmovgeq %rdi, %rsi
4459 ; SSE-NEXT: cmpq %rax, %rdi
4460 ; SSE-NEXT: sbbq %rdx, %r8
4461 ; SSE-NEXT: cmovgeq %rdi, %rax
4462 ; SSE-NEXT: movq %rax, %xmm0
4463 ; SSE-NEXT: movq %rsi, %xmm1
4464 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4465 ; SSE-NEXT: addq $24, %rsp
4466 ; SSE-NEXT: popq %rbx
4467 ; SSE-NEXT: popq %r14
4470 ; AVX2-LABEL: stest_f32i64_mm:
4471 ; AVX2: # %bb.0: # %entry
4472 ; AVX2-NEXT: pushq %r14
4473 ; AVX2-NEXT: pushq %rbx
4474 ; AVX2-NEXT: subq $24, %rsp
4475 ; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
4476 ; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
4477 ; AVX2-NEXT: callq __fixsfti@PLT
4478 ; AVX2-NEXT: movq %rax, %rbx
4479 ; AVX2-NEXT: movq %rdx, %r14
4480 ; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4481 ; AVX2-NEXT: callq __fixsfti@PLT
4482 ; AVX2-NEXT: xorl %ecx, %ecx
4483 ; AVX2-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4484 ; AVX2-NEXT: cmpq %rsi, %rax
4485 ; AVX2-NEXT: movq %rdx, %rdi
4486 ; AVX2-NEXT: sbbq $0, %rdi
4487 ; AVX2-NEXT: cmovgeq %rcx, %rdx
4488 ; AVX2-NEXT: cmovgeq %rsi, %rax
4489 ; AVX2-NEXT: cmpq %rsi, %rbx
4490 ; AVX2-NEXT: movq %r14, %rdi
4491 ; AVX2-NEXT: sbbq $0, %rdi
4492 ; AVX2-NEXT: cmovlq %r14, %rcx
4493 ; AVX2-NEXT: cmovlq %rbx, %rsi
4494 ; AVX2-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4495 ; AVX2-NEXT: cmpq %rsi, %rdi
4496 ; AVX2-NEXT: movq $-1, %r8
4497 ; AVX2-NEXT: sbbq %rcx, %r8
4498 ; AVX2-NEXT: movq $-1, %rcx
4499 ; AVX2-NEXT: cmovgeq %rdi, %rsi
4500 ; AVX2-NEXT: cmpq %rax, %rdi
4501 ; AVX2-NEXT: sbbq %rdx, %rcx
4502 ; AVX2-NEXT: cmovgeq %rdi, %rax
4503 ; AVX2-NEXT: vmovq %rax, %xmm0
4504 ; AVX2-NEXT: vmovq %rsi, %xmm1
4505 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4506 ; AVX2-NEXT: addq $24, %rsp
4507 ; AVX2-NEXT: popq %rbx
4508 ; AVX2-NEXT: popq %r14
4511 ; AVX512-LABEL: stest_f32i64_mm:
4512 ; AVX512: # %bb.0: # %entry
4513 ; AVX512-NEXT: pushq %r14
4514 ; AVX512-NEXT: pushq %rbx
4515 ; AVX512-NEXT: subq $24, %rsp
4516 ; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
4517 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
4518 ; AVX512-NEXT: callq __fixsfti@PLT
4519 ; AVX512-NEXT: movq %rax, %rbx
4520 ; AVX512-NEXT: movq %rdx, %r14
4521 ; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4522 ; AVX512-NEXT: callq __fixsfti@PLT
4523 ; AVX512-NEXT: xorl %ecx, %ecx
4524 ; AVX512-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4525 ; AVX512-NEXT: cmpq %rsi, %rax
4526 ; AVX512-NEXT: movq %rdx, %rdi
4527 ; AVX512-NEXT: sbbq $0, %rdi
4528 ; AVX512-NEXT: cmovgeq %rcx, %rdx
4529 ; AVX512-NEXT: cmovgeq %rsi, %rax
4530 ; AVX512-NEXT: cmpq %rsi, %rbx
4531 ; AVX512-NEXT: movq %r14, %rdi
4532 ; AVX512-NEXT: sbbq $0, %rdi
4533 ; AVX512-NEXT: cmovlq %r14, %rcx
4534 ; AVX512-NEXT: cmovlq %rbx, %rsi
4535 ; AVX512-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4536 ; AVX512-NEXT: cmpq %rsi, %rdi
4537 ; AVX512-NEXT: movq $-1, %r8
4538 ; AVX512-NEXT: movq $-1, %r9
4539 ; AVX512-NEXT: sbbq %rcx, %r9
4540 ; AVX512-NEXT: cmovgeq %rdi, %rsi
4541 ; AVX512-NEXT: cmpq %rax, %rdi
4542 ; AVX512-NEXT: sbbq %rdx, %r8
4543 ; AVX512-NEXT: cmovgeq %rdi, %rax
4544 ; AVX512-NEXT: vmovq %rax, %xmm0
4545 ; AVX512-NEXT: vmovq %rsi, %xmm1
4546 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4547 ; AVX512-NEXT: addq $24, %rsp
4548 ; AVX512-NEXT: popq %rbx
4549 ; AVX512-NEXT: popq %r14
4552 %conv = fptosi <2 x float> %x to <2 x i128>
4553 %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>)
4554 %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>)
4555 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
4556 ret <2 x i64> %conv6
4559 define <2 x i64> @utest_f32i64_mm(<2 x float> %x) nounwind {
4560 ; SSE-LABEL: utest_f32i64_mm:
4561 ; SSE: # %bb.0: # %entry
4562 ; SSE-NEXT: pushq %r14
4563 ; SSE-NEXT: pushq %rbx
4564 ; SSE-NEXT: subq $24, %rsp
4565 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
4566 ; SSE-NEXT: callq __fixunssfti@PLT
4567 ; SSE-NEXT: movq %rax, %rbx
4568 ; SSE-NEXT: movq %rdx, %r14
4569 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4570 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
4571 ; SSE-NEXT: callq __fixunssfti@PLT
4572 ; SSE-NEXT: xorl %ecx, %ecx
4573 ; SSE-NEXT: testq %rdx, %rdx
4574 ; SSE-NEXT: cmovneq %rcx, %rax
4575 ; SSE-NEXT: testq %r14, %r14
4576 ; SSE-NEXT: cmovneq %rcx, %rbx
4577 ; SSE-NEXT: movq %rbx, %xmm0
4578 ; SSE-NEXT: movq %rax, %xmm1
4579 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4580 ; SSE-NEXT: addq $24, %rsp
4581 ; SSE-NEXT: popq %rbx
4582 ; SSE-NEXT: popq %r14
4585 ; AVX-LABEL: utest_f32i64_mm:
4586 ; AVX: # %bb.0: # %entry
4587 ; AVX-NEXT: pushq %r14
4588 ; AVX-NEXT: pushq %rbx
4589 ; AVX-NEXT: subq $24, %rsp
4590 ; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
4591 ; AVX-NEXT: callq __fixunssfti@PLT
4592 ; AVX-NEXT: movq %rax, %rbx
4593 ; AVX-NEXT: movq %rdx, %r14
4594 ; AVX-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
4595 ; AVX-NEXT: # xmm0 = mem[1,1,3,3]
4596 ; AVX-NEXT: callq __fixunssfti@PLT
4597 ; AVX-NEXT: xorl %ecx, %ecx
4598 ; AVX-NEXT: testq %rdx, %rdx
4599 ; AVX-NEXT: cmovneq %rcx, %rax
4600 ; AVX-NEXT: testq %r14, %r14
4601 ; AVX-NEXT: cmovneq %rcx, %rbx
4602 ; AVX-NEXT: vmovq %rbx, %xmm0
4603 ; AVX-NEXT: vmovq %rax, %xmm1
4604 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4605 ; AVX-NEXT: addq $24, %rsp
4606 ; AVX-NEXT: popq %rbx
4607 ; AVX-NEXT: popq %r14
4610 %conv = fptoui <2 x float> %x to <2 x i128>
4611 %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
4612 %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
4613 ret <2 x i64> %conv6
4616 define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) nounwind {
4617 ; SSE-LABEL: ustest_f32i64_mm:
4618 ; SSE: # %bb.0: # %entry
4619 ; SSE-NEXT: pushq %r14
4620 ; SSE-NEXT: pushq %rbx
4621 ; SSE-NEXT: subq $24, %rsp
4622 ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
4623 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
4624 ; SSE-NEXT: callq __fixsfti@PLT
4625 ; SSE-NEXT: movq %rax, %rbx
4626 ; SSE-NEXT: movq %rdx, %r14
4627 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4628 ; SSE-NEXT: callq __fixsfti@PLT
4629 ; SSE-NEXT: xorl %ecx, %ecx
4630 ; SSE-NEXT: testq %rdx, %rdx
4631 ; SSE-NEXT: cmovgq %rcx, %rax
4632 ; SSE-NEXT: movl $1, %esi
4633 ; SSE-NEXT: cmovgq %rsi, %rdx
4634 ; SSE-NEXT: testq %r14, %r14
4635 ; SSE-NEXT: cmovgq %rcx, %rbx
4636 ; SSE-NEXT: cmovleq %r14, %rsi
4637 ; SSE-NEXT: testq %rsi, %rsi
4638 ; SSE-NEXT: cmovsq %rcx, %rbx
4639 ; SSE-NEXT: testq %rdx, %rdx
4640 ; SSE-NEXT: cmovsq %rcx, %rax
4641 ; SSE-NEXT: movq %rax, %xmm0
4642 ; SSE-NEXT: movq %rbx, %xmm1
4643 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4644 ; SSE-NEXT: addq $24, %rsp
4645 ; SSE-NEXT: popq %rbx
4646 ; SSE-NEXT: popq %r14
4649 ; AVX-LABEL: ustest_f32i64_mm:
4650 ; AVX: # %bb.0: # %entry
4651 ; AVX-NEXT: pushq %r14
4652 ; AVX-NEXT: pushq %rbx
4653 ; AVX-NEXT: subq $24, %rsp
4654 ; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
4655 ; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
4656 ; AVX-NEXT: callq __fixsfti@PLT
4657 ; AVX-NEXT: movq %rax, %rbx
4658 ; AVX-NEXT: movq %rdx, %r14
4659 ; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4660 ; AVX-NEXT: callq __fixsfti@PLT
4661 ; AVX-NEXT: xorl %ecx, %ecx
4662 ; AVX-NEXT: testq %rdx, %rdx
4663 ; AVX-NEXT: cmovgq %rcx, %rax
4664 ; AVX-NEXT: movl $1, %esi
4665 ; AVX-NEXT: cmovgq %rsi, %rdx
4666 ; AVX-NEXT: testq %r14, %r14
4667 ; AVX-NEXT: cmovgq %rcx, %rbx
4668 ; AVX-NEXT: cmovleq %r14, %rsi
4669 ; AVX-NEXT: testq %rsi, %rsi
4670 ; AVX-NEXT: cmovsq %rcx, %rbx
4671 ; AVX-NEXT: testq %rdx, %rdx
4672 ; AVX-NEXT: cmovsq %rcx, %rax
4673 ; AVX-NEXT: vmovq %rax, %xmm0
4674 ; AVX-NEXT: vmovq %rbx, %xmm1
4675 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4676 ; AVX-NEXT: addq $24, %rsp
4677 ; AVX-NEXT: popq %rbx
4678 ; AVX-NEXT: popq %r14
4681 %conv = fptosi <2 x float> %x to <2 x i128>
4682 %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
4683 %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer)
4684 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
4685 ret <2 x i64> %conv6
4688 define <2 x i64> @stest_f16i64_mm(<2 x half> %x) nounwind {
4689 ; SSE-LABEL: stest_f16i64_mm:
4690 ; SSE: # %bb.0: # %entry
4691 ; SSE-NEXT: pushq %r14
4692 ; SSE-NEXT: pushq %rbx
4693 ; SSE-NEXT: subq $24, %rsp
4694 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
4695 ; SSE-NEXT: psrld $16, %xmm0
4696 ; SSE-NEXT: callq __fixhfti@PLT
4697 ; SSE-NEXT: movq %rax, %rbx
4698 ; SSE-NEXT: movq %rdx, %r14
4699 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4700 ; SSE-NEXT: callq __fixhfti@PLT
4701 ; SSE-NEXT: xorl %ecx, %ecx
4702 ; SSE-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4703 ; SSE-NEXT: cmpq %rsi, %rax
4704 ; SSE-NEXT: movq %rdx, %rdi
4705 ; SSE-NEXT: sbbq $0, %rdi
4706 ; SSE-NEXT: cmovgeq %rcx, %rdx
4707 ; SSE-NEXT: cmovgeq %rsi, %rax
4708 ; SSE-NEXT: cmpq %rsi, %rbx
4709 ; SSE-NEXT: movq %r14, %rdi
4710 ; SSE-NEXT: sbbq $0, %rdi
4711 ; SSE-NEXT: cmovlq %r14, %rcx
4712 ; SSE-NEXT: cmovlq %rbx, %rsi
4713 ; SSE-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4714 ; SSE-NEXT: cmpq %rsi, %rdi
4715 ; SSE-NEXT: movq $-1, %r8
4716 ; SSE-NEXT: movq $-1, %r9
4717 ; SSE-NEXT: sbbq %rcx, %r9
4718 ; SSE-NEXT: cmovgeq %rdi, %rsi
4719 ; SSE-NEXT: cmpq %rax, %rdi
4720 ; SSE-NEXT: sbbq %rdx, %r8
4721 ; SSE-NEXT: cmovgeq %rdi, %rax
4722 ; SSE-NEXT: movq %rax, %xmm0
4723 ; SSE-NEXT: movq %rsi, %xmm1
4724 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4725 ; SSE-NEXT: addq $24, %rsp
4726 ; SSE-NEXT: popq %rbx
4727 ; SSE-NEXT: popq %r14
4730 ; AVX2-LABEL: stest_f16i64_mm:
4731 ; AVX2: # %bb.0: # %entry
4732 ; AVX2-NEXT: pushq %r14
4733 ; AVX2-NEXT: pushq %rbx
4734 ; AVX2-NEXT: subq $24, %rsp
4735 ; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
4736 ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
4737 ; AVX2-NEXT: callq __fixhfti@PLT
4738 ; AVX2-NEXT: movq %rax, %rbx
4739 ; AVX2-NEXT: movq %rdx, %r14
4740 ; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4741 ; AVX2-NEXT: callq __fixhfti@PLT
4742 ; AVX2-NEXT: xorl %ecx, %ecx
4743 ; AVX2-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4744 ; AVX2-NEXT: cmpq %rsi, %rax
4745 ; AVX2-NEXT: movq %rdx, %rdi
4746 ; AVX2-NEXT: sbbq $0, %rdi
4747 ; AVX2-NEXT: cmovgeq %rcx, %rdx
4748 ; AVX2-NEXT: cmovgeq %rsi, %rax
4749 ; AVX2-NEXT: cmpq %rsi, %rbx
4750 ; AVX2-NEXT: movq %r14, %rdi
4751 ; AVX2-NEXT: sbbq $0, %rdi
4752 ; AVX2-NEXT: cmovlq %r14, %rcx
4753 ; AVX2-NEXT: cmovlq %rbx, %rsi
4754 ; AVX2-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4755 ; AVX2-NEXT: cmpq %rsi, %rdi
4756 ; AVX2-NEXT: movq $-1, %r8
4757 ; AVX2-NEXT: sbbq %rcx, %r8
4758 ; AVX2-NEXT: movq $-1, %rcx
4759 ; AVX2-NEXT: cmovgeq %rdi, %rsi
4760 ; AVX2-NEXT: cmpq %rax, %rdi
4761 ; AVX2-NEXT: sbbq %rdx, %rcx
4762 ; AVX2-NEXT: cmovgeq %rdi, %rax
4763 ; AVX2-NEXT: vmovq %rax, %xmm0
4764 ; AVX2-NEXT: vmovq %rsi, %xmm1
4765 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4766 ; AVX2-NEXT: addq $24, %rsp
4767 ; AVX2-NEXT: popq %rbx
4768 ; AVX2-NEXT: popq %r14
4771 ; AVX512-LABEL: stest_f16i64_mm:
4772 ; AVX512: # %bb.0: # %entry
4773 ; AVX512-NEXT: pushq %r14
4774 ; AVX512-NEXT: pushq %rbx
4775 ; AVX512-NEXT: subq $24, %rsp
4776 ; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
4777 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
4778 ; AVX512-NEXT: callq __fixhfti@PLT
4779 ; AVX512-NEXT: movq %rax, %rbx
4780 ; AVX512-NEXT: movq %rdx, %r14
4781 ; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4782 ; AVX512-NEXT: callq __fixhfti@PLT
4783 ; AVX512-NEXT: xorl %ecx, %ecx
4784 ; AVX512-NEXT: movabsq $9223372036854775807, %rsi # imm = 0x7FFFFFFFFFFFFFFF
4785 ; AVX512-NEXT: cmpq %rsi, %rax
4786 ; AVX512-NEXT: movq %rdx, %rdi
4787 ; AVX512-NEXT: sbbq $0, %rdi
4788 ; AVX512-NEXT: cmovgeq %rcx, %rdx
4789 ; AVX512-NEXT: cmovgeq %rsi, %rax
4790 ; AVX512-NEXT: cmpq %rsi, %rbx
4791 ; AVX512-NEXT: movq %r14, %rdi
4792 ; AVX512-NEXT: sbbq $0, %rdi
4793 ; AVX512-NEXT: cmovlq %r14, %rcx
4794 ; AVX512-NEXT: cmovlq %rbx, %rsi
4795 ; AVX512-NEXT: movabsq $-9223372036854775808, %rdi # imm = 0x8000000000000000
4796 ; AVX512-NEXT: cmpq %rsi, %rdi
4797 ; AVX512-NEXT: movq $-1, %r8
4798 ; AVX512-NEXT: movq $-1, %r9
4799 ; AVX512-NEXT: sbbq %rcx, %r9
4800 ; AVX512-NEXT: cmovgeq %rdi, %rsi
4801 ; AVX512-NEXT: cmpq %rax, %rdi
4802 ; AVX512-NEXT: sbbq %rdx, %r8
4803 ; AVX512-NEXT: cmovgeq %rdi, %rax
4804 ; AVX512-NEXT: vmovq %rax, %xmm0
4805 ; AVX512-NEXT: vmovq %rsi, %xmm1
4806 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4807 ; AVX512-NEXT: addq $24, %rsp
4808 ; AVX512-NEXT: popq %rbx
4809 ; AVX512-NEXT: popq %r14
4812 %conv = fptosi <2 x half> %x to <2 x i128>
4813 %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>)
4814 %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>)
4815 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
4816 ret <2 x i64> %conv6
4819 define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) nounwind {
4820 ; SSE-LABEL: utesth_f16i64_mm:
4821 ; SSE: # %bb.0: # %entry
4822 ; SSE-NEXT: pushq %r14
4823 ; SSE-NEXT: pushq %rbx
4824 ; SSE-NEXT: subq $24, %rsp
4825 ; SSE-NEXT: movdqa %xmm0, %xmm1
4826 ; SSE-NEXT: psrld $16, %xmm1
4827 ; SSE-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
4828 ; SSE-NEXT: callq __fixunshfti@PLT
4829 ; SSE-NEXT: movq %rax, %rbx
4830 ; SSE-NEXT: movq %rdx, %r14
4831 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4832 ; SSE-NEXT: callq __fixunshfti@PLT
4833 ; SSE-NEXT: xorl %ecx, %ecx
4834 ; SSE-NEXT: testq %rdx, %rdx
4835 ; SSE-NEXT: cmovneq %rcx, %rax
4836 ; SSE-NEXT: testq %r14, %r14
4837 ; SSE-NEXT: cmovneq %rcx, %rbx
4838 ; SSE-NEXT: movq %rbx, %xmm0
4839 ; SSE-NEXT: movq %rax, %xmm1
4840 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4841 ; SSE-NEXT: addq $24, %rsp
4842 ; SSE-NEXT: popq %rbx
4843 ; SSE-NEXT: popq %r14
4846 ; AVX2-LABEL: utesth_f16i64_mm:
4847 ; AVX2: # %bb.0: # %entry
4848 ; AVX2-NEXT: pushq %r14
4849 ; AVX2-NEXT: pushq %rbx
4850 ; AVX2-NEXT: subq $24, %rsp
4851 ; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
4852 ; AVX2-NEXT: callq __fixunshfti@PLT
4853 ; AVX2-NEXT: movq %rax, %rbx
4854 ; AVX2-NEXT: movq %rdx, %r14
4855 ; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
4856 ; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
4857 ; AVX2-NEXT: callq __fixunshfti@PLT
4858 ; AVX2-NEXT: xorl %ecx, %ecx
4859 ; AVX2-NEXT: testq %rdx, %rdx
4860 ; AVX2-NEXT: cmovneq %rcx, %rax
4861 ; AVX2-NEXT: testq %r14, %r14
4862 ; AVX2-NEXT: cmovneq %rcx, %rbx
4863 ; AVX2-NEXT: vmovq %rbx, %xmm0
4864 ; AVX2-NEXT: vmovq %rax, %xmm1
4865 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4866 ; AVX2-NEXT: addq $24, %rsp
4867 ; AVX2-NEXT: popq %rbx
4868 ; AVX2-NEXT: popq %r14
4871 ; AVX512-LABEL: utesth_f16i64_mm:
4872 ; AVX512: # %bb.0: # %entry
4873 ; AVX512-NEXT: pushq %r14
4874 ; AVX512-NEXT: pushq %rbx
4875 ; AVX512-NEXT: subq $24, %rsp
4876 ; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
4877 ; AVX512-NEXT: callq __fixunshfti@PLT
4878 ; AVX512-NEXT: movq %rax, %rbx
4879 ; AVX512-NEXT: movq %rdx, %r14
4880 ; AVX512-NEXT: vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
4881 ; AVX512-NEXT: callq __fixunshfti@PLT
4882 ; AVX512-NEXT: xorl %ecx, %ecx
4883 ; AVX512-NEXT: testq %rdx, %rdx
4884 ; AVX512-NEXT: cmovneq %rcx, %rax
4885 ; AVX512-NEXT: testq %r14, %r14
4886 ; AVX512-NEXT: cmovneq %rcx, %rbx
4887 ; AVX512-NEXT: vmovq %rbx, %xmm0
4888 ; AVX512-NEXT: vmovq %rax, %xmm1
4889 ; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4890 ; AVX512-NEXT: addq $24, %rsp
4891 ; AVX512-NEXT: popq %rbx
4892 ; AVX512-NEXT: popq %r14
4895 %conv = fptoui <2 x half> %x to <2 x i128>
4896 %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
4897 %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
4898 ret <2 x i64> %conv6
4901 define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) nounwind {
4902 ; SSE-LABEL: ustest_f16i64_mm:
4903 ; SSE: # %bb.0: # %entry
4904 ; SSE-NEXT: pushq %r14
4905 ; SSE-NEXT: pushq %rbx
4906 ; SSE-NEXT: subq $24, %rsp
4907 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
4908 ; SSE-NEXT: psrld $16, %xmm0
4909 ; SSE-NEXT: callq __fixhfti@PLT
4910 ; SSE-NEXT: movq %rax, %rbx
4911 ; SSE-NEXT: movq %rdx, %r14
4912 ; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
4913 ; SSE-NEXT: callq __fixhfti@PLT
4914 ; SSE-NEXT: xorl %ecx, %ecx
4915 ; SSE-NEXT: testq %rdx, %rdx
4916 ; SSE-NEXT: cmovgq %rcx, %rax
4917 ; SSE-NEXT: movl $1, %esi
4918 ; SSE-NEXT: cmovgq %rsi, %rdx
4919 ; SSE-NEXT: testq %r14, %r14
4920 ; SSE-NEXT: cmovgq %rcx, %rbx
4921 ; SSE-NEXT: cmovleq %r14, %rsi
4922 ; SSE-NEXT: testq %rsi, %rsi
4923 ; SSE-NEXT: cmovsq %rcx, %rbx
4924 ; SSE-NEXT: testq %rdx, %rdx
4925 ; SSE-NEXT: cmovsq %rcx, %rax
4926 ; SSE-NEXT: movq %rax, %xmm0
4927 ; SSE-NEXT: movq %rbx, %xmm1
4928 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4929 ; SSE-NEXT: addq $24, %rsp
4930 ; SSE-NEXT: popq %rbx
4931 ; SSE-NEXT: popq %r14
4934 ; AVX-LABEL: ustest_f16i64_mm:
4935 ; AVX: # %bb.0: # %entry
4936 ; AVX-NEXT: pushq %r14
4937 ; AVX-NEXT: pushq %rbx
4938 ; AVX-NEXT: subq $24, %rsp
4939 ; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
4940 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
4941 ; AVX-NEXT: callq __fixhfti@PLT
4942 ; AVX-NEXT: movq %rax, %rbx
4943 ; AVX-NEXT: movq %rdx, %r14
4944 ; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
4945 ; AVX-NEXT: callq __fixhfti@PLT
4946 ; AVX-NEXT: xorl %ecx, %ecx
4947 ; AVX-NEXT: testq %rdx, %rdx
4948 ; AVX-NEXT: cmovgq %rcx, %rax
4949 ; AVX-NEXT: movl $1, %esi
4950 ; AVX-NEXT: cmovgq %rsi, %rdx
4951 ; AVX-NEXT: testq %r14, %r14
4952 ; AVX-NEXT: cmovgq %rcx, %rbx
4953 ; AVX-NEXT: cmovleq %r14, %rsi
4954 ; AVX-NEXT: testq %rsi, %rsi
4955 ; AVX-NEXT: cmovsq %rcx, %rbx
4956 ; AVX-NEXT: testq %rdx, %rdx
4957 ; AVX-NEXT: cmovsq %rcx, %rax
4958 ; AVX-NEXT: vmovq %rax, %xmm0
4959 ; AVX-NEXT: vmovq %rbx, %xmm1
4960 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
4961 ; AVX-NEXT: addq $24, %rsp
4962 ; AVX-NEXT: popq %rbx
4963 ; AVX-NEXT: popq %r14
4966 %conv = fptosi <2 x half> %x to <2 x i128>
4967 %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
4968 %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer)
4969 %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
4970 ret <2 x i64> %conv6
4973 declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
4974 declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
4975 declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
4976 declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
4977 declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
4978 declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
4979 declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
4980 declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
4981 declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
4982 declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
4983 declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
4984 declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
4985 declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
4986 declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
4987 declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
4988 declare <2 x i128> @llvm.smin.v2i128(<2 x i128>, <2 x i128>)
4989 declare <2 x i128> @llvm.smax.v2i128(<2 x i128>, <2 x i128>)
4990 declare <2 x i128> @llvm.umin.v2i128(<2 x i128>, <2 x i128>)