1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE42
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
11 ; Unsigned Maximum (GT)
14 define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
15 ; SSE2-LABEL: max_gt_v2i64:
17 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
18 ; SSE2-NEXT: movdqa %xmm1, %xmm3
19 ; SSE2-NEXT: pxor %xmm2, %xmm3
20 ; SSE2-NEXT: pxor %xmm0, %xmm2
21 ; SSE2-NEXT: movdqa %xmm2, %xmm4
22 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
23 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
24 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
25 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
26 ; SSE2-NEXT: pand %xmm5, %xmm2
27 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
28 ; SSE2-NEXT: por %xmm2, %xmm3
29 ; SSE2-NEXT: pand %xmm3, %xmm0
30 ; SSE2-NEXT: pandn %xmm1, %xmm3
31 ; SSE2-NEXT: por %xmm3, %xmm0
34 ; SSE41-LABEL: max_gt_v2i64:
36 ; SSE41-NEXT: movdqa %xmm0, %xmm2
37 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
38 ; SSE41-NEXT: movdqa %xmm1, %xmm0
39 ; SSE41-NEXT: pxor %xmm3, %xmm0
40 ; SSE41-NEXT: pxor %xmm2, %xmm3
41 ; SSE41-NEXT: movdqa %xmm3, %xmm4
42 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
43 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
44 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
45 ; SSE41-NEXT: pand %xmm4, %xmm0
46 ; SSE41-NEXT: por %xmm3, %xmm0
47 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
48 ; SSE41-NEXT: movapd %xmm1, %xmm0
51 ; SSE42-LABEL: max_gt_v2i64:
53 ; SSE42-NEXT: movdqa %xmm0, %xmm2
54 ; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
55 ; SSE42-NEXT: movdqa %xmm1, %xmm3
56 ; SSE42-NEXT: pxor %xmm0, %xmm3
57 ; SSE42-NEXT: pxor %xmm2, %xmm0
58 ; SSE42-NEXT: pcmpgtq %xmm3, %xmm0
59 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
60 ; SSE42-NEXT: movapd %xmm1, %xmm0
63 ; AVX1-LABEL: max_gt_v2i64:
65 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
66 ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
67 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
68 ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
69 ; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
72 ; AVX2-LABEL: max_gt_v2i64:
74 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
75 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
76 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
77 ; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
78 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
81 ; AVX512-LABEL: max_gt_v2i64:
83 ; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
84 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
85 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
86 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
87 ; AVX512-NEXT: vzeroupper
89 %1 = icmp ugt <2 x i64> %a, %b
90 %2 = select <2 x i1> %1, <2 x i64> %a, <2 x i64> %b
94 define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
95 ; SSE2-LABEL: max_gt_v4i64:
97 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
98 ; SSE2-NEXT: movdqa %xmm2, %xmm5
99 ; SSE2-NEXT: pxor %xmm4, %xmm5
100 ; SSE2-NEXT: movdqa %xmm0, %xmm6
101 ; SSE2-NEXT: pxor %xmm4, %xmm6
102 ; SSE2-NEXT: movdqa %xmm6, %xmm7
103 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
104 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
105 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
106 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
107 ; SSE2-NEXT: pand %xmm8, %xmm5
108 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
109 ; SSE2-NEXT: por %xmm5, %xmm6
110 ; SSE2-NEXT: pand %xmm6, %xmm0
111 ; SSE2-NEXT: pandn %xmm2, %xmm6
112 ; SSE2-NEXT: por %xmm6, %xmm0
113 ; SSE2-NEXT: movdqa %xmm3, %xmm2
114 ; SSE2-NEXT: pxor %xmm4, %xmm2
115 ; SSE2-NEXT: pxor %xmm1, %xmm4
116 ; SSE2-NEXT: movdqa %xmm4, %xmm5
117 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
118 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
119 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
120 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
121 ; SSE2-NEXT: pand %xmm6, %xmm2
122 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
123 ; SSE2-NEXT: por %xmm2, %xmm4
124 ; SSE2-NEXT: pand %xmm4, %xmm1
125 ; SSE2-NEXT: pandn %xmm3, %xmm4
126 ; SSE2-NEXT: por %xmm4, %xmm1
129 ; SSE41-LABEL: max_gt_v4i64:
131 ; SSE41-NEXT: movdqa %xmm0, %xmm4
132 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
133 ; SSE41-NEXT: movdqa %xmm2, %xmm6
134 ; SSE41-NEXT: pxor %xmm5, %xmm6
135 ; SSE41-NEXT: movdqa %xmm0, %xmm7
136 ; SSE41-NEXT: pxor %xmm5, %xmm7
137 ; SSE41-NEXT: movdqa %xmm7, %xmm0
138 ; SSE41-NEXT: pcmpeqd %xmm6, %xmm0
139 ; SSE41-NEXT: pcmpgtd %xmm6, %xmm7
140 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,0,2,2]
141 ; SSE41-NEXT: pand %xmm6, %xmm0
142 ; SSE41-NEXT: por %xmm7, %xmm0
143 ; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
144 ; SSE41-NEXT: movdqa %xmm3, %xmm0
145 ; SSE41-NEXT: pxor %xmm5, %xmm0
146 ; SSE41-NEXT: pxor %xmm1, %xmm5
147 ; SSE41-NEXT: movdqa %xmm5, %xmm4
148 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
149 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
150 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
151 ; SSE41-NEXT: pand %xmm4, %xmm0
152 ; SSE41-NEXT: por %xmm5, %xmm0
153 ; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
154 ; SSE41-NEXT: movapd %xmm2, %xmm0
155 ; SSE41-NEXT: movapd %xmm3, %xmm1
158 ; SSE42-LABEL: max_gt_v4i64:
160 ; SSE42-NEXT: movdqa %xmm0, %xmm4
161 ; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
162 ; SSE42-NEXT: movdqa %xmm2, %xmm6
163 ; SSE42-NEXT: pxor %xmm5, %xmm6
164 ; SSE42-NEXT: pxor %xmm5, %xmm0
165 ; SSE42-NEXT: pcmpgtq %xmm6, %xmm0
166 ; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
167 ; SSE42-NEXT: movdqa %xmm3, %xmm0
168 ; SSE42-NEXT: pxor %xmm5, %xmm0
169 ; SSE42-NEXT: pxor %xmm1, %xmm5
170 ; SSE42-NEXT: pcmpgtq %xmm0, %xmm5
171 ; SSE42-NEXT: movdqa %xmm5, %xmm0
172 ; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
173 ; SSE42-NEXT: movapd %xmm2, %xmm0
174 ; SSE42-NEXT: movapd %xmm3, %xmm1
177 ; AVX1-LABEL: max_gt_v4i64:
179 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
180 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
181 ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
182 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
183 ; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
184 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
185 ; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
186 ; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
187 ; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
188 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
189 ; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
192 ; AVX2-LABEL: max_gt_v4i64:
194 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
195 ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
196 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
197 ; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
198 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
201 ; AVX512-LABEL: max_gt_v4i64:
203 ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
204 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
205 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
206 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
208 %1 = icmp ugt <4 x i64> %a, %b
209 %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
213 define <4 x i32> @max_gt_v4i32(<4 x i32> %a, <4 x i32> %b) {
214 ; SSE2-LABEL: max_gt_v4i32:
216 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
217 ; SSE2-NEXT: movdqa %xmm1, %xmm3
218 ; SSE2-NEXT: pxor %xmm2, %xmm3
219 ; SSE2-NEXT: pxor %xmm0, %xmm2
220 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
221 ; SSE2-NEXT: pand %xmm2, %xmm0
222 ; SSE2-NEXT: pandn %xmm1, %xmm2
223 ; SSE2-NEXT: por %xmm2, %xmm0
226 ; SSE41-LABEL: max_gt_v4i32:
228 ; SSE41-NEXT: pmaxud %xmm1, %xmm0
231 ; SSE42-LABEL: max_gt_v4i32:
233 ; SSE42-NEXT: pmaxud %xmm1, %xmm0
236 ; AVX-LABEL: max_gt_v4i32:
238 ; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
240 %1 = icmp ugt <4 x i32> %a, %b
241 %2 = select <4 x i1> %1, <4 x i32> %a, <4 x i32> %b
245 define <8 x i32> @max_gt_v8i32(<8 x i32> %a, <8 x i32> %b) {
246 ; SSE2-LABEL: max_gt_v8i32:
248 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
249 ; SSE2-NEXT: movdqa %xmm2, %xmm6
250 ; SSE2-NEXT: pxor %xmm5, %xmm6
251 ; SSE2-NEXT: movdqa %xmm0, %xmm4
252 ; SSE2-NEXT: pxor %xmm5, %xmm4
253 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
254 ; SSE2-NEXT: pand %xmm4, %xmm0
255 ; SSE2-NEXT: pandn %xmm2, %xmm4
256 ; SSE2-NEXT: por %xmm0, %xmm4
257 ; SSE2-NEXT: movdqa %xmm3, %xmm0
258 ; SSE2-NEXT: pxor %xmm5, %xmm0
259 ; SSE2-NEXT: pxor %xmm1, %xmm5
260 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
261 ; SSE2-NEXT: pand %xmm5, %xmm1
262 ; SSE2-NEXT: pandn %xmm3, %xmm5
263 ; SSE2-NEXT: por %xmm5, %xmm1
264 ; SSE2-NEXT: movdqa %xmm4, %xmm0
267 ; SSE41-LABEL: max_gt_v8i32:
269 ; SSE41-NEXT: pmaxud %xmm2, %xmm0
270 ; SSE41-NEXT: pmaxud %xmm3, %xmm1
273 ; SSE42-LABEL: max_gt_v8i32:
275 ; SSE42-NEXT: pmaxud %xmm2, %xmm0
276 ; SSE42-NEXT: pmaxud %xmm3, %xmm1
279 ; AVX1-LABEL: max_gt_v8i32:
281 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
282 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
283 ; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
284 ; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
285 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
288 ; AVX2-LABEL: max_gt_v8i32:
290 ; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
293 ; AVX512-LABEL: max_gt_v8i32:
295 ; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
297 %1 = icmp ugt <8 x i32> %a, %b
298 %2 = select <8 x i1> %1, <8 x i32> %a, <8 x i32> %b
302 define <8 x i16> @max_gt_v8i16(<8 x i16> %a, <8 x i16> %b) {
303 ; SSE2-LABEL: max_gt_v8i16:
305 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
306 ; SSE2-NEXT: pxor %xmm2, %xmm1
307 ; SSE2-NEXT: pxor %xmm2, %xmm0
308 ; SSE2-NEXT: pmaxsw %xmm1, %xmm0
309 ; SSE2-NEXT: pxor %xmm2, %xmm0
312 ; SSE41-LABEL: max_gt_v8i16:
314 ; SSE41-NEXT: pmaxuw %xmm1, %xmm0
317 ; SSE42-LABEL: max_gt_v8i16:
319 ; SSE42-NEXT: pmaxuw %xmm1, %xmm0
322 ; AVX-LABEL: max_gt_v8i16:
324 ; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
326 %1 = icmp ugt <8 x i16> %a, %b
327 %2 = select <8 x i1> %1, <8 x i16> %a, <8 x i16> %b
331 define <16 x i16> @max_gt_v16i16(<16 x i16> %a, <16 x i16> %b) {
332 ; SSE2-LABEL: max_gt_v16i16:
334 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
335 ; SSE2-NEXT: pxor %xmm4, %xmm2
336 ; SSE2-NEXT: pxor %xmm4, %xmm0
337 ; SSE2-NEXT: pmaxsw %xmm2, %xmm0
338 ; SSE2-NEXT: pxor %xmm4, %xmm0
339 ; SSE2-NEXT: pxor %xmm4, %xmm3
340 ; SSE2-NEXT: pxor %xmm4, %xmm1
341 ; SSE2-NEXT: pmaxsw %xmm3, %xmm1
342 ; SSE2-NEXT: pxor %xmm4, %xmm1
345 ; SSE41-LABEL: max_gt_v16i16:
347 ; SSE41-NEXT: pmaxuw %xmm2, %xmm0
348 ; SSE41-NEXT: pmaxuw %xmm3, %xmm1
351 ; SSE42-LABEL: max_gt_v16i16:
353 ; SSE42-NEXT: pmaxuw %xmm2, %xmm0
354 ; SSE42-NEXT: pmaxuw %xmm3, %xmm1
357 ; AVX1-LABEL: max_gt_v16i16:
359 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
360 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
361 ; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
362 ; AVX1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
363 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
366 ; AVX2-LABEL: max_gt_v16i16:
368 ; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
371 ; AVX512-LABEL: max_gt_v16i16:
373 ; AVX512-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
375 %1 = icmp ugt <16 x i16> %a, %b
376 %2 = select <16 x i1> %1, <16 x i16> %a, <16 x i16> %b
380 define <16 x i8> @max_gt_v16i8(<16 x i8> %a, <16 x i8> %b) {
381 ; SSE-LABEL: max_gt_v16i8:
383 ; SSE-NEXT: pmaxub %xmm1, %xmm0
386 ; AVX-LABEL: max_gt_v16i8:
388 ; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
390 %1 = icmp ugt <16 x i8> %a, %b
391 %2 = select <16 x i1> %1, <16 x i8> %a, <16 x i8> %b
395 define <32 x i8> @max_gt_v32i8(<32 x i8> %a, <32 x i8> %b) {
396 ; SSE-LABEL: max_gt_v32i8:
398 ; SSE-NEXT: pmaxub %xmm2, %xmm0
399 ; SSE-NEXT: pmaxub %xmm3, %xmm1
402 ; AVX1-LABEL: max_gt_v32i8:
404 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
405 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
406 ; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
407 ; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
408 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
411 ; AVX2-LABEL: max_gt_v32i8:
413 ; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
416 ; AVX512-LABEL: max_gt_v32i8:
418 ; AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
420 %1 = icmp ugt <32 x i8> %a, %b
421 %2 = select <32 x i1> %1, <32 x i8> %a, <32 x i8> %b
426 ; Unsigned Maximum (GE)
429 define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
430 ; SSE2-LABEL: max_ge_v2i64:
432 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
433 ; SSE2-NEXT: movdqa %xmm1, %xmm3
434 ; SSE2-NEXT: pxor %xmm2, %xmm3
435 ; SSE2-NEXT: pxor %xmm0, %xmm2
436 ; SSE2-NEXT: movdqa %xmm2, %xmm4
437 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
438 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
439 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
440 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
441 ; SSE2-NEXT: pand %xmm5, %xmm2
442 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
443 ; SSE2-NEXT: por %xmm2, %xmm3
444 ; SSE2-NEXT: pand %xmm3, %xmm0
445 ; SSE2-NEXT: pandn %xmm1, %xmm3
446 ; SSE2-NEXT: por %xmm3, %xmm0
449 ; SSE41-LABEL: max_ge_v2i64:
451 ; SSE41-NEXT: movdqa %xmm0, %xmm2
452 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
453 ; SSE41-NEXT: movdqa %xmm1, %xmm0
454 ; SSE41-NEXT: pxor %xmm3, %xmm0
455 ; SSE41-NEXT: pxor %xmm2, %xmm3
456 ; SSE41-NEXT: movdqa %xmm3, %xmm4
457 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
458 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
459 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
460 ; SSE41-NEXT: pand %xmm4, %xmm0
461 ; SSE41-NEXT: por %xmm3, %xmm0
462 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
463 ; SSE41-NEXT: movapd %xmm1, %xmm0
466 ; SSE42-LABEL: max_ge_v2i64:
468 ; SSE42-NEXT: movdqa %xmm0, %xmm2
469 ; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
470 ; SSE42-NEXT: movdqa %xmm1, %xmm3
471 ; SSE42-NEXT: pxor %xmm0, %xmm3
472 ; SSE42-NEXT: pxor %xmm2, %xmm0
473 ; SSE42-NEXT: pcmpgtq %xmm3, %xmm0
474 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
475 ; SSE42-NEXT: movapd %xmm1, %xmm0
478 ; AVX1-LABEL: max_ge_v2i64:
480 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
481 ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
482 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
483 ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
484 ; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
487 ; AVX2-LABEL: max_ge_v2i64:
489 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
490 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
491 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
492 ; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
493 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
496 ; AVX512-LABEL: max_ge_v2i64:
498 ; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
499 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
500 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
501 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
502 ; AVX512-NEXT: vzeroupper
504 %1 = icmp uge <2 x i64> %a, %b
505 %2 = select <2 x i1> %1, <2 x i64> %a, <2 x i64> %b
509 define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
510 ; SSE2-LABEL: max_ge_v4i64:
512 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
513 ; SSE2-NEXT: movdqa %xmm2, %xmm5
514 ; SSE2-NEXT: pxor %xmm4, %xmm5
515 ; SSE2-NEXT: movdqa %xmm0, %xmm6
516 ; SSE2-NEXT: pxor %xmm4, %xmm6
517 ; SSE2-NEXT: movdqa %xmm6, %xmm7
518 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
519 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
520 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
521 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
522 ; SSE2-NEXT: pand %xmm8, %xmm5
523 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
524 ; SSE2-NEXT: por %xmm5, %xmm6
525 ; SSE2-NEXT: pand %xmm6, %xmm0
526 ; SSE2-NEXT: pandn %xmm2, %xmm6
527 ; SSE2-NEXT: por %xmm6, %xmm0
528 ; SSE2-NEXT: movdqa %xmm3, %xmm2
529 ; SSE2-NEXT: pxor %xmm4, %xmm2
530 ; SSE2-NEXT: pxor %xmm1, %xmm4
531 ; SSE2-NEXT: movdqa %xmm4, %xmm5
532 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
533 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
534 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
535 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
536 ; SSE2-NEXT: pand %xmm6, %xmm2
537 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
538 ; SSE2-NEXT: por %xmm2, %xmm4
539 ; SSE2-NEXT: pand %xmm4, %xmm1
540 ; SSE2-NEXT: pandn %xmm3, %xmm4
541 ; SSE2-NEXT: por %xmm4, %xmm1
544 ; SSE41-LABEL: max_ge_v4i64:
546 ; SSE41-NEXT: movdqa %xmm0, %xmm4
547 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
548 ; SSE41-NEXT: movdqa %xmm2, %xmm6
549 ; SSE41-NEXT: pxor %xmm5, %xmm6
550 ; SSE41-NEXT: movdqa %xmm0, %xmm7
551 ; SSE41-NEXT: pxor %xmm5, %xmm7
552 ; SSE41-NEXT: movdqa %xmm7, %xmm0
553 ; SSE41-NEXT: pcmpeqd %xmm6, %xmm0
554 ; SSE41-NEXT: pcmpgtd %xmm6, %xmm7
555 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm7[0,0,2,2]
556 ; SSE41-NEXT: pand %xmm6, %xmm0
557 ; SSE41-NEXT: por %xmm7, %xmm0
558 ; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
559 ; SSE41-NEXT: movdqa %xmm3, %xmm0
560 ; SSE41-NEXT: pxor %xmm5, %xmm0
561 ; SSE41-NEXT: pxor %xmm1, %xmm5
562 ; SSE41-NEXT: movdqa %xmm5, %xmm4
563 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
564 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
565 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
566 ; SSE41-NEXT: pand %xmm4, %xmm0
567 ; SSE41-NEXT: por %xmm5, %xmm0
568 ; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
569 ; SSE41-NEXT: movapd %xmm2, %xmm0
570 ; SSE41-NEXT: movapd %xmm3, %xmm1
573 ; SSE42-LABEL: max_ge_v4i64:
575 ; SSE42-NEXT: movdqa %xmm0, %xmm4
576 ; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
577 ; SSE42-NEXT: movdqa %xmm2, %xmm6
578 ; SSE42-NEXT: pxor %xmm5, %xmm6
579 ; SSE42-NEXT: pxor %xmm5, %xmm0
580 ; SSE42-NEXT: pcmpgtq %xmm6, %xmm0
581 ; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
582 ; SSE42-NEXT: movdqa %xmm3, %xmm0
583 ; SSE42-NEXT: pxor %xmm5, %xmm0
584 ; SSE42-NEXT: pxor %xmm1, %xmm5
585 ; SSE42-NEXT: pcmpgtq %xmm0, %xmm5
586 ; SSE42-NEXT: movdqa %xmm5, %xmm0
587 ; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
588 ; SSE42-NEXT: movapd %xmm2, %xmm0
589 ; SSE42-NEXT: movapd %xmm3, %xmm1
592 ; AVX1-LABEL: max_ge_v4i64:
594 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
595 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
596 ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
597 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
598 ; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
599 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
600 ; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm4
601 ; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm3
602 ; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
603 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
604 ; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
607 ; AVX2-LABEL: max_ge_v4i64:
609 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
610 ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3
611 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
612 ; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
613 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
616 ; AVX512-LABEL: max_ge_v4i64:
618 ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
619 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
620 ; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
621 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
623 %1 = icmp uge <4 x i64> %a, %b
624 %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
628 define <4 x i32> @max_ge_v4i32(<4 x i32> %a, <4 x i32> %b) {
629 ; SSE2-LABEL: max_ge_v4i32:
631 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
632 ; SSE2-NEXT: movdqa %xmm1, %xmm3
633 ; SSE2-NEXT: pxor %xmm2, %xmm3
634 ; SSE2-NEXT: pxor %xmm0, %xmm2
635 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
636 ; SSE2-NEXT: pand %xmm2, %xmm0
637 ; SSE2-NEXT: pandn %xmm1, %xmm2
638 ; SSE2-NEXT: por %xmm2, %xmm0
641 ; SSE41-LABEL: max_ge_v4i32:
643 ; SSE41-NEXT: pmaxud %xmm1, %xmm0
646 ; SSE42-LABEL: max_ge_v4i32:
648 ; SSE42-NEXT: pmaxud %xmm1, %xmm0
651 ; AVX-LABEL: max_ge_v4i32:
653 ; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
655 %1 = icmp uge <4 x i32> %a, %b
656 %2 = select <4 x i1> %1, <4 x i32> %a, <4 x i32> %b
660 define <8 x i32> @max_ge_v8i32(<8 x i32> %a, <8 x i32> %b) {
661 ; SSE2-LABEL: max_ge_v8i32:
663 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
664 ; SSE2-NEXT: movdqa %xmm2, %xmm6
665 ; SSE2-NEXT: pxor %xmm5, %xmm6
666 ; SSE2-NEXT: movdqa %xmm0, %xmm4
667 ; SSE2-NEXT: pxor %xmm5, %xmm4
668 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
669 ; SSE2-NEXT: pand %xmm4, %xmm0
670 ; SSE2-NEXT: pandn %xmm2, %xmm4
671 ; SSE2-NEXT: por %xmm0, %xmm4
672 ; SSE2-NEXT: movdqa %xmm3, %xmm0
673 ; SSE2-NEXT: pxor %xmm5, %xmm0
674 ; SSE2-NEXT: pxor %xmm1, %xmm5
675 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
676 ; SSE2-NEXT: pand %xmm5, %xmm1
677 ; SSE2-NEXT: pandn %xmm3, %xmm5
678 ; SSE2-NEXT: por %xmm5, %xmm1
679 ; SSE2-NEXT: movdqa %xmm4, %xmm0
682 ; SSE41-LABEL: max_ge_v8i32:
684 ; SSE41-NEXT: pmaxud %xmm2, %xmm0
685 ; SSE41-NEXT: pmaxud %xmm3, %xmm1
688 ; SSE42-LABEL: max_ge_v8i32:
690 ; SSE42-NEXT: pmaxud %xmm2, %xmm0
691 ; SSE42-NEXT: pmaxud %xmm3, %xmm1
694 ; AVX1-LABEL: max_ge_v8i32:
696 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
697 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
698 ; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm2
699 ; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
700 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
703 ; AVX2-LABEL: max_ge_v8i32:
705 ; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
708 ; AVX512-LABEL: max_ge_v8i32:
710 ; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
712 %1 = icmp uge <8 x i32> %a, %b
713 %2 = select <8 x i1> %1, <8 x i32> %a, <8 x i32> %b
717 define <8 x i16> @max_ge_v8i16(<8 x i16> %a, <8 x i16> %b) {
718 ; SSE2-LABEL: max_ge_v8i16:
720 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
721 ; SSE2-NEXT: pxor %xmm2, %xmm1
722 ; SSE2-NEXT: pxor %xmm2, %xmm0
723 ; SSE2-NEXT: pmaxsw %xmm1, %xmm0
724 ; SSE2-NEXT: pxor %xmm2, %xmm0
727 ; SSE41-LABEL: max_ge_v8i16:
729 ; SSE41-NEXT: pmaxuw %xmm1, %xmm0
732 ; SSE42-LABEL: max_ge_v8i16:
734 ; SSE42-NEXT: pmaxuw %xmm1, %xmm0
737 ; AVX-LABEL: max_ge_v8i16:
739 ; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
741 %1 = icmp uge <8 x i16> %a, %b
742 %2 = select <8 x i1> %1, <8 x i16> %a, <8 x i16> %b
746 define <16 x i16> @max_ge_v16i16(<16 x i16> %a, <16 x i16> %b) {
747 ; SSE2-LABEL: max_ge_v16i16:
749 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
750 ; SSE2-NEXT: pxor %xmm4, %xmm2
751 ; SSE2-NEXT: pxor %xmm4, %xmm0
752 ; SSE2-NEXT: pmaxsw %xmm2, %xmm0
753 ; SSE2-NEXT: pxor %xmm4, %xmm0
754 ; SSE2-NEXT: pxor %xmm4, %xmm3
755 ; SSE2-NEXT: pxor %xmm4, %xmm1
756 ; SSE2-NEXT: pmaxsw %xmm3, %xmm1
757 ; SSE2-NEXT: pxor %xmm4, %xmm1
760 ; SSE41-LABEL: max_ge_v16i16:
762 ; SSE41-NEXT: pmaxuw %xmm2, %xmm0
763 ; SSE41-NEXT: pmaxuw %xmm3, %xmm1
766 ; SSE42-LABEL: max_ge_v16i16:
768 ; SSE42-NEXT: pmaxuw %xmm2, %xmm0
769 ; SSE42-NEXT: pmaxuw %xmm3, %xmm1
772 ; AVX1-LABEL: max_ge_v16i16:
774 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
775 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
776 ; AVX1-NEXT: vpmaxuw %xmm2, %xmm3, %xmm2
777 ; AVX1-NEXT: vpmaxuw %xmm1, %xmm0, %xmm0
778 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
781 ; AVX2-LABEL: max_ge_v16i16:
783 ; AVX2-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
786 ; AVX512-LABEL: max_ge_v16i16:
788 ; AVX512-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0
790 %1 = icmp uge <16 x i16> %a, %b
791 %2 = select <16 x i1> %1, <16 x i16> %a, <16 x i16> %b
795 define <16 x i8> @max_ge_v16i8(<16 x i8> %a, <16 x i8> %b) {
796 ; SSE-LABEL: max_ge_v16i8:
798 ; SSE-NEXT: pmaxub %xmm1, %xmm0
801 ; AVX-LABEL: max_ge_v16i8:
803 ; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
805 %1 = icmp uge <16 x i8> %a, %b
806 %2 = select <16 x i1> %1, <16 x i8> %a, <16 x i8> %b
810 define <32 x i8> @max_ge_v32i8(<32 x i8> %a, <32 x i8> %b) {
811 ; SSE-LABEL: max_ge_v32i8:
813 ; SSE-NEXT: pmaxub %xmm2, %xmm0
814 ; SSE-NEXT: pmaxub %xmm3, %xmm1
817 ; AVX1-LABEL: max_ge_v32i8:
819 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
820 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
821 ; AVX1-NEXT: vpmaxub %xmm2, %xmm3, %xmm2
822 ; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
823 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
826 ; AVX2-LABEL: max_ge_v32i8:
828 ; AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
831 ; AVX512-LABEL: max_ge_v32i8:
833 ; AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
835 %1 = icmp uge <32 x i8> %a, %b
836 %2 = select <32 x i1> %1, <32 x i8> %a, <32 x i8> %b
841 ; Unsigned Minimum (LT)
844 define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
845 ; SSE2-LABEL: min_lt_v2i64:
847 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
848 ; SSE2-NEXT: movdqa %xmm0, %xmm3
849 ; SSE2-NEXT: pxor %xmm2, %xmm3
850 ; SSE2-NEXT: pxor %xmm1, %xmm2
851 ; SSE2-NEXT: movdqa %xmm2, %xmm4
852 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
853 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
854 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
855 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
856 ; SSE2-NEXT: pand %xmm5, %xmm2
857 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
858 ; SSE2-NEXT: por %xmm2, %xmm3
859 ; SSE2-NEXT: pand %xmm3, %xmm0
860 ; SSE2-NEXT: pandn %xmm1, %xmm3
861 ; SSE2-NEXT: por %xmm3, %xmm0
864 ; SSE41-LABEL: min_lt_v2i64:
866 ; SSE41-NEXT: movdqa %xmm0, %xmm2
867 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
868 ; SSE41-NEXT: pxor %xmm3, %xmm0
869 ; SSE41-NEXT: pxor %xmm1, %xmm3
870 ; SSE41-NEXT: movdqa %xmm3, %xmm4
871 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
872 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
873 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
874 ; SSE41-NEXT: pand %xmm4, %xmm0
875 ; SSE41-NEXT: por %xmm3, %xmm0
876 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
877 ; SSE41-NEXT: movapd %xmm1, %xmm0
880 ; SSE42-LABEL: min_lt_v2i64:
882 ; SSE42-NEXT: movdqa %xmm0, %xmm2
883 ; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
884 ; SSE42-NEXT: movdqa %xmm2, %xmm3
885 ; SSE42-NEXT: pxor %xmm0, %xmm3
886 ; SSE42-NEXT: pxor %xmm1, %xmm0
887 ; SSE42-NEXT: pcmpgtq %xmm3, %xmm0
888 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
889 ; SSE42-NEXT: movapd %xmm1, %xmm0
892 ; AVX1-LABEL: min_lt_v2i64:
894 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
895 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
896 ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
897 ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
898 ; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
901 ; AVX2-LABEL: min_lt_v2i64:
903 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
904 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
905 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
906 ; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
907 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
910 ; AVX512-LABEL: min_lt_v2i64:
912 ; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
913 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
914 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
915 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
916 ; AVX512-NEXT: vzeroupper
918 %1 = icmp ult <2 x i64> %a, %b
919 %2 = select <2 x i1> %1, <2 x i64> %a, <2 x i64> %b
923 define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
924 ; SSE2-LABEL: min_lt_v4i64:
926 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
927 ; SSE2-NEXT: movdqa %xmm0, %xmm5
928 ; SSE2-NEXT: pxor %xmm4, %xmm5
929 ; SSE2-NEXT: movdqa %xmm2, %xmm6
930 ; SSE2-NEXT: pxor %xmm4, %xmm6
931 ; SSE2-NEXT: movdqa %xmm6, %xmm7
932 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
933 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
934 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
935 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
936 ; SSE2-NEXT: pand %xmm8, %xmm5
937 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
938 ; SSE2-NEXT: por %xmm5, %xmm6
939 ; SSE2-NEXT: pand %xmm6, %xmm0
940 ; SSE2-NEXT: pandn %xmm2, %xmm6
941 ; SSE2-NEXT: por %xmm6, %xmm0
942 ; SSE2-NEXT: movdqa %xmm1, %xmm2
943 ; SSE2-NEXT: pxor %xmm4, %xmm2
944 ; SSE2-NEXT: pxor %xmm3, %xmm4
945 ; SSE2-NEXT: movdqa %xmm4, %xmm5
946 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
947 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
948 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
949 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
950 ; SSE2-NEXT: pand %xmm6, %xmm2
951 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
952 ; SSE2-NEXT: por %xmm2, %xmm4
953 ; SSE2-NEXT: pand %xmm4, %xmm1
954 ; SSE2-NEXT: pandn %xmm3, %xmm4
955 ; SSE2-NEXT: por %xmm4, %xmm1
958 ; SSE41-LABEL: min_lt_v4i64:
960 ; SSE41-NEXT: movdqa %xmm0, %xmm4
961 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
962 ; SSE41-NEXT: pxor %xmm5, %xmm0
963 ; SSE41-NEXT: movdqa %xmm2, %xmm6
964 ; SSE41-NEXT: pxor %xmm5, %xmm6
965 ; SSE41-NEXT: movdqa %xmm6, %xmm7
966 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
967 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
968 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
969 ; SSE41-NEXT: pand %xmm7, %xmm0
970 ; SSE41-NEXT: por %xmm6, %xmm0
971 ; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
972 ; SSE41-NEXT: movdqa %xmm1, %xmm0
973 ; SSE41-NEXT: pxor %xmm5, %xmm0
974 ; SSE41-NEXT: pxor %xmm3, %xmm5
975 ; SSE41-NEXT: movdqa %xmm5, %xmm4
976 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
977 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
978 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
979 ; SSE41-NEXT: pand %xmm4, %xmm0
980 ; SSE41-NEXT: por %xmm5, %xmm0
981 ; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
982 ; SSE41-NEXT: movapd %xmm2, %xmm0
983 ; SSE41-NEXT: movapd %xmm3, %xmm1
986 ; SSE42-LABEL: min_lt_v4i64:
988 ; SSE42-NEXT: movdqa %xmm0, %xmm4
989 ; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
990 ; SSE42-NEXT: movdqa %xmm0, %xmm6
991 ; SSE42-NEXT: pxor %xmm5, %xmm6
992 ; SSE42-NEXT: movdqa %xmm2, %xmm0
993 ; SSE42-NEXT: pxor %xmm5, %xmm0
994 ; SSE42-NEXT: pcmpgtq %xmm6, %xmm0
995 ; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
996 ; SSE42-NEXT: movdqa %xmm1, %xmm0
997 ; SSE42-NEXT: pxor %xmm5, %xmm0
998 ; SSE42-NEXT: pxor %xmm3, %xmm5
999 ; SSE42-NEXT: pcmpgtq %xmm0, %xmm5
1000 ; SSE42-NEXT: movdqa %xmm5, %xmm0
1001 ; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
1002 ; SSE42-NEXT: movapd %xmm2, %xmm0
1003 ; SSE42-NEXT: movapd %xmm3, %xmm1
1006 ; AVX1-LABEL: min_lt_v4i64:
1008 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
1009 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
1010 ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
1011 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
1012 ; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
1013 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
1014 ; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm4
1015 ; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm3
1016 ; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
1017 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
1018 ; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
1021 ; AVX2-LABEL: min_lt_v4i64:
1023 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
1024 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
1025 ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
1026 ; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
1027 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
1030 ; AVX512-LABEL: min_lt_v4i64:
1032 ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
1033 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
1034 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
1035 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
1037 %1 = icmp ult <4 x i64> %a, %b
1038 %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
1042 define <4 x i32> @min_lt_v4i32(<4 x i32> %a, <4 x i32> %b) {
1043 ; SSE2-LABEL: min_lt_v4i32:
1045 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
1046 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1047 ; SSE2-NEXT: pxor %xmm2, %xmm3
1048 ; SSE2-NEXT: pxor %xmm1, %xmm2
1049 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
1050 ; SSE2-NEXT: pand %xmm2, %xmm0
1051 ; SSE2-NEXT: pandn %xmm1, %xmm2
1052 ; SSE2-NEXT: por %xmm2, %xmm0
1055 ; SSE41-LABEL: min_lt_v4i32:
1057 ; SSE41-NEXT: pminud %xmm1, %xmm0
1060 ; SSE42-LABEL: min_lt_v4i32:
1062 ; SSE42-NEXT: pminud %xmm1, %xmm0
1065 ; AVX-LABEL: min_lt_v4i32:
1067 ; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
1069 %1 = icmp ult <4 x i32> %a, %b
1070 %2 = select <4 x i1> %1, <4 x i32> %a, <4 x i32> %b
1074 define <8 x i32> @min_lt_v8i32(<8 x i32> %a, <8 x i32> %b) {
1075 ; SSE2-LABEL: min_lt_v8i32:
1077 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
1078 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1079 ; SSE2-NEXT: pxor %xmm4, %xmm5
1080 ; SSE2-NEXT: movdqa %xmm2, %xmm6
1081 ; SSE2-NEXT: pxor %xmm4, %xmm6
1082 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
1083 ; SSE2-NEXT: pand %xmm6, %xmm0
1084 ; SSE2-NEXT: pandn %xmm2, %xmm6
1085 ; SSE2-NEXT: por %xmm6, %xmm0
1086 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1087 ; SSE2-NEXT: pxor %xmm4, %xmm2
1088 ; SSE2-NEXT: pxor %xmm3, %xmm4
1089 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
1090 ; SSE2-NEXT: pand %xmm4, %xmm1
1091 ; SSE2-NEXT: pandn %xmm3, %xmm4
1092 ; SSE2-NEXT: por %xmm4, %xmm1
1095 ; SSE41-LABEL: min_lt_v8i32:
1097 ; SSE41-NEXT: pminud %xmm2, %xmm0
1098 ; SSE41-NEXT: pminud %xmm3, %xmm1
1101 ; SSE42-LABEL: min_lt_v8i32:
1103 ; SSE42-NEXT: pminud %xmm2, %xmm0
1104 ; SSE42-NEXT: pminud %xmm3, %xmm1
1107 ; AVX1-LABEL: min_lt_v8i32:
1109 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1110 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
1111 ; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
1112 ; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
1113 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1116 ; AVX2-LABEL: min_lt_v8i32:
1118 ; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
1121 ; AVX512-LABEL: min_lt_v8i32:
1123 ; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
1125 %1 = icmp ult <8 x i32> %a, %b
1126 %2 = select <8 x i1> %1, <8 x i32> %a, <8 x i32> %b
1130 define <8 x i16> @min_lt_v8i16(<8 x i16> %a, <8 x i16> %b) {
1131 ; SSE2-LABEL: min_lt_v8i16:
1133 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
1134 ; SSE2-NEXT: pxor %xmm2, %xmm1
1135 ; SSE2-NEXT: pxor %xmm2, %xmm0
1136 ; SSE2-NEXT: pminsw %xmm1, %xmm0
1137 ; SSE2-NEXT: pxor %xmm2, %xmm0
1140 ; SSE41-LABEL: min_lt_v8i16:
1142 ; SSE41-NEXT: pminuw %xmm1, %xmm0
1145 ; SSE42-LABEL: min_lt_v8i16:
1147 ; SSE42-NEXT: pminuw %xmm1, %xmm0
1150 ; AVX-LABEL: min_lt_v8i16:
1152 ; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
1154 %1 = icmp ult <8 x i16> %a, %b
1155 %2 = select <8 x i1> %1, <8 x i16> %a, <8 x i16> %b
1159 define <16 x i16> @min_lt_v16i16(<16 x i16> %a, <16 x i16> %b) {
1160 ; SSE2-LABEL: min_lt_v16i16:
1162 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
1163 ; SSE2-NEXT: pxor %xmm4, %xmm2
1164 ; SSE2-NEXT: pxor %xmm4, %xmm0
1165 ; SSE2-NEXT: pminsw %xmm2, %xmm0
1166 ; SSE2-NEXT: pxor %xmm4, %xmm0
1167 ; SSE2-NEXT: pxor %xmm4, %xmm3
1168 ; SSE2-NEXT: pxor %xmm4, %xmm1
1169 ; SSE2-NEXT: pminsw %xmm3, %xmm1
1170 ; SSE2-NEXT: pxor %xmm4, %xmm1
1173 ; SSE41-LABEL: min_lt_v16i16:
1175 ; SSE41-NEXT: pminuw %xmm2, %xmm0
1176 ; SSE41-NEXT: pminuw %xmm3, %xmm1
1179 ; SSE42-LABEL: min_lt_v16i16:
1181 ; SSE42-NEXT: pminuw %xmm2, %xmm0
1182 ; SSE42-NEXT: pminuw %xmm3, %xmm1
1185 ; AVX1-LABEL: min_lt_v16i16:
1187 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1188 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
1189 ; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
1190 ; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
1191 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1194 ; AVX2-LABEL: min_lt_v16i16:
1196 ; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
1199 ; AVX512-LABEL: min_lt_v16i16:
1201 ; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
1203 %1 = icmp ult <16 x i16> %a, %b
1204 %2 = select <16 x i1> %1, <16 x i16> %a, <16 x i16> %b
1208 define <16 x i8> @min_lt_v16i8(<16 x i8> %a, <16 x i8> %b) {
1209 ; SSE-LABEL: min_lt_v16i8:
1211 ; SSE-NEXT: pminub %xmm1, %xmm0
1214 ; AVX-LABEL: min_lt_v16i8:
1216 ; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
1218 %1 = icmp ult <16 x i8> %a, %b
1219 %2 = select <16 x i1> %1, <16 x i8> %a, <16 x i8> %b
1223 define <32 x i8> @min_lt_v32i8(<32 x i8> %a, <32 x i8> %b) {
1224 ; SSE-LABEL: min_lt_v32i8:
1226 ; SSE-NEXT: pminub %xmm2, %xmm0
1227 ; SSE-NEXT: pminub %xmm3, %xmm1
1230 ; AVX1-LABEL: min_lt_v32i8:
1232 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1233 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
1234 ; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
1235 ; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
1236 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1239 ; AVX2-LABEL: min_lt_v32i8:
1241 ; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
1244 ; AVX512-LABEL: min_lt_v32i8:
1246 ; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
1248 %1 = icmp ult <32 x i8> %a, %b
1249 %2 = select <32 x i1> %1, <32 x i8> %a, <32 x i8> %b
1254 ; Unsigned Minimum (LE)
1257 define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
1258 ; SSE2-LABEL: min_le_v2i64:
1260 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
1261 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1262 ; SSE2-NEXT: pxor %xmm2, %xmm3
1263 ; SSE2-NEXT: pxor %xmm1, %xmm2
1264 ; SSE2-NEXT: movdqa %xmm2, %xmm4
1265 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
1266 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1267 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
1268 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1269 ; SSE2-NEXT: pand %xmm5, %xmm2
1270 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
1271 ; SSE2-NEXT: por %xmm2, %xmm3
1272 ; SSE2-NEXT: pand %xmm3, %xmm0
1273 ; SSE2-NEXT: pandn %xmm1, %xmm3
1274 ; SSE2-NEXT: por %xmm3, %xmm0
1277 ; SSE41-LABEL: min_le_v2i64:
1279 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1280 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
1281 ; SSE41-NEXT: pxor %xmm3, %xmm0
1282 ; SSE41-NEXT: pxor %xmm1, %xmm3
1283 ; SSE41-NEXT: movdqa %xmm3, %xmm4
1284 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
1285 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
1286 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
1287 ; SSE41-NEXT: pand %xmm4, %xmm0
1288 ; SSE41-NEXT: por %xmm3, %xmm0
1289 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
1290 ; SSE41-NEXT: movapd %xmm1, %xmm0
1293 ; SSE42-LABEL: min_le_v2i64:
1295 ; SSE42-NEXT: movdqa %xmm0, %xmm2
1296 ; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775808,9223372036854775808]
1297 ; SSE42-NEXT: movdqa %xmm2, %xmm3
1298 ; SSE42-NEXT: pxor %xmm0, %xmm3
1299 ; SSE42-NEXT: pxor %xmm1, %xmm0
1300 ; SSE42-NEXT: pcmpgtq %xmm3, %xmm0
1301 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
1302 ; SSE42-NEXT: movapd %xmm1, %xmm0
1305 ; AVX1-LABEL: min_le_v2i64:
1307 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1308 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3
1309 ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
1310 ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
1311 ; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
1314 ; AVX2-LABEL: min_le_v2i64:
1316 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1317 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
1318 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
1319 ; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
1320 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
1323 ; AVX512-LABEL: min_le_v2i64:
1325 ; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
1326 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1327 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
1328 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1329 ; AVX512-NEXT: vzeroupper
1331 %1 = icmp ule <2 x i64> %a, %b
1332 %2 = select <2 x i1> %1, <2 x i64> %a, <2 x i64> %b
1336 define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
1337 ; SSE2-LABEL: min_le_v4i64:
1339 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
1340 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1341 ; SSE2-NEXT: pxor %xmm4, %xmm5
1342 ; SSE2-NEXT: movdqa %xmm2, %xmm6
1343 ; SSE2-NEXT: pxor %xmm4, %xmm6
1344 ; SSE2-NEXT: movdqa %xmm6, %xmm7
1345 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
1346 ; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
1347 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
1348 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
1349 ; SSE2-NEXT: pand %xmm8, %xmm5
1350 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
1351 ; SSE2-NEXT: por %xmm5, %xmm6
1352 ; SSE2-NEXT: pand %xmm6, %xmm0
1353 ; SSE2-NEXT: pandn %xmm2, %xmm6
1354 ; SSE2-NEXT: por %xmm6, %xmm0
1355 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1356 ; SSE2-NEXT: pxor %xmm4, %xmm2
1357 ; SSE2-NEXT: pxor %xmm3, %xmm4
1358 ; SSE2-NEXT: movdqa %xmm4, %xmm5
1359 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
1360 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
1361 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm4
1362 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
1363 ; SSE2-NEXT: pand %xmm6, %xmm2
1364 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
1365 ; SSE2-NEXT: por %xmm2, %xmm4
1366 ; SSE2-NEXT: pand %xmm4, %xmm1
1367 ; SSE2-NEXT: pandn %xmm3, %xmm4
1368 ; SSE2-NEXT: por %xmm4, %xmm1
1371 ; SSE41-LABEL: min_le_v4i64:
1373 ; SSE41-NEXT: movdqa %xmm0, %xmm4
1374 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
1375 ; SSE41-NEXT: pxor %xmm5, %xmm0
1376 ; SSE41-NEXT: movdqa %xmm2, %xmm6
1377 ; SSE41-NEXT: pxor %xmm5, %xmm6
1378 ; SSE41-NEXT: movdqa %xmm6, %xmm7
1379 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm7
1380 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
1381 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
1382 ; SSE41-NEXT: pand %xmm7, %xmm0
1383 ; SSE41-NEXT: por %xmm6, %xmm0
1384 ; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
1385 ; SSE41-NEXT: movdqa %xmm1, %xmm0
1386 ; SSE41-NEXT: pxor %xmm5, %xmm0
1387 ; SSE41-NEXT: pxor %xmm3, %xmm5
1388 ; SSE41-NEXT: movdqa %xmm5, %xmm4
1389 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
1390 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
1391 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
1392 ; SSE41-NEXT: pand %xmm4, %xmm0
1393 ; SSE41-NEXT: por %xmm5, %xmm0
1394 ; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
1395 ; SSE41-NEXT: movapd %xmm2, %xmm0
1396 ; SSE41-NEXT: movapd %xmm3, %xmm1
1399 ; SSE42-LABEL: min_le_v4i64:
1401 ; SSE42-NEXT: movdqa %xmm0, %xmm4
1402 ; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
1403 ; SSE42-NEXT: movdqa %xmm0, %xmm6
1404 ; SSE42-NEXT: pxor %xmm5, %xmm6
1405 ; SSE42-NEXT: movdqa %xmm2, %xmm0
1406 ; SSE42-NEXT: pxor %xmm5, %xmm0
1407 ; SSE42-NEXT: pcmpgtq %xmm6, %xmm0
1408 ; SSE42-NEXT: blendvpd %xmm0, %xmm4, %xmm2
1409 ; SSE42-NEXT: movdqa %xmm1, %xmm0
1410 ; SSE42-NEXT: pxor %xmm5, %xmm0
1411 ; SSE42-NEXT: pxor %xmm3, %xmm5
1412 ; SSE42-NEXT: pcmpgtq %xmm0, %xmm5
1413 ; SSE42-NEXT: movdqa %xmm5, %xmm0
1414 ; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm3
1415 ; SSE42-NEXT: movapd %xmm2, %xmm0
1416 ; SSE42-NEXT: movapd %xmm3, %xmm1
1419 ; AVX1-LABEL: min_le_v4i64:
1421 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
1422 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
1423 ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
1424 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
1425 ; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
1426 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2
1427 ; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm4
1428 ; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm3
1429 ; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
1430 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
1431 ; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
1434 ; AVX2-LABEL: min_le_v4i64:
1436 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
1437 ; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
1438 ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
1439 ; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
1440 ; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
1443 ; AVX512-LABEL: min_le_v4i64:
1445 ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
1446 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
1447 ; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
1448 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
1450 %1 = icmp ule <4 x i64> %a, %b
1451 %2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
1455 define <4 x i32> @min_le_v4i32(<4 x i32> %a, <4 x i32> %b) {
1456 ; SSE2-LABEL: min_le_v4i32:
1458 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
1459 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1460 ; SSE2-NEXT: pxor %xmm2, %xmm3
1461 ; SSE2-NEXT: pxor %xmm1, %xmm2
1462 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
1463 ; SSE2-NEXT: pand %xmm2, %xmm0
1464 ; SSE2-NEXT: pandn %xmm1, %xmm2
1465 ; SSE2-NEXT: por %xmm2, %xmm0
1468 ; SSE41-LABEL: min_le_v4i32:
1470 ; SSE41-NEXT: pminud %xmm1, %xmm0
1473 ; SSE42-LABEL: min_le_v4i32:
1475 ; SSE42-NEXT: pminud %xmm1, %xmm0
1478 ; AVX-LABEL: min_le_v4i32:
1480 ; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
1482 %1 = icmp ule <4 x i32> %a, %b
1483 %2 = select <4 x i1> %1, <4 x i32> %a, <4 x i32> %b
1487 define <8 x i32> @min_le_v8i32(<8 x i32> %a, <8 x i32> %b) {
1488 ; SSE2-LABEL: min_le_v8i32:
1490 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
1491 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1492 ; SSE2-NEXT: pxor %xmm4, %xmm5
1493 ; SSE2-NEXT: movdqa %xmm2, %xmm6
1494 ; SSE2-NEXT: pxor %xmm4, %xmm6
1495 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
1496 ; SSE2-NEXT: pand %xmm6, %xmm0
1497 ; SSE2-NEXT: pandn %xmm2, %xmm6
1498 ; SSE2-NEXT: por %xmm6, %xmm0
1499 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1500 ; SSE2-NEXT: pxor %xmm4, %xmm2
1501 ; SSE2-NEXT: pxor %xmm3, %xmm4
1502 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
1503 ; SSE2-NEXT: pand %xmm4, %xmm1
1504 ; SSE2-NEXT: pandn %xmm3, %xmm4
1505 ; SSE2-NEXT: por %xmm4, %xmm1
1508 ; SSE41-LABEL: min_le_v8i32:
1510 ; SSE41-NEXT: pminud %xmm2, %xmm0
1511 ; SSE41-NEXT: pminud %xmm3, %xmm1
1514 ; SSE42-LABEL: min_le_v8i32:
1516 ; SSE42-NEXT: pminud %xmm2, %xmm0
1517 ; SSE42-NEXT: pminud %xmm3, %xmm1
1520 ; AVX1-LABEL: min_le_v8i32:
1522 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1523 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
1524 ; AVX1-NEXT: vpminud %xmm2, %xmm3, %xmm2
1525 ; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
1526 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1529 ; AVX2-LABEL: min_le_v8i32:
1531 ; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
1534 ; AVX512-LABEL: min_le_v8i32:
1536 ; AVX512-NEXT: vpminud %ymm1, %ymm0, %ymm0
1538 %1 = icmp ule <8 x i32> %a, %b
1539 %2 = select <8 x i1> %1, <8 x i32> %a, <8 x i32> %b
1543 define <8 x i16> @min_le_v8i16(<8 x i16> %a, <8 x i16> %b) {
1544 ; SSE2-LABEL: min_le_v8i16:
1546 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
1547 ; SSE2-NEXT: pxor %xmm2, %xmm1
1548 ; SSE2-NEXT: pxor %xmm2, %xmm0
1549 ; SSE2-NEXT: pminsw %xmm1, %xmm0
1550 ; SSE2-NEXT: pxor %xmm2, %xmm0
1553 ; SSE41-LABEL: min_le_v8i16:
1555 ; SSE41-NEXT: pminuw %xmm1, %xmm0
1558 ; SSE42-LABEL: min_le_v8i16:
1560 ; SSE42-NEXT: pminuw %xmm1, %xmm0
1563 ; AVX-LABEL: min_le_v8i16:
1565 ; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0
1567 %1 = icmp ule <8 x i16> %a, %b
1568 %2 = select <8 x i1> %1, <8 x i16> %a, <8 x i16> %b
1572 define <16 x i16> @min_le_v16i16(<16 x i16> %a, <16 x i16> %b) {
1573 ; SSE2-LABEL: min_le_v16i16:
1575 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
1576 ; SSE2-NEXT: pxor %xmm4, %xmm2
1577 ; SSE2-NEXT: pxor %xmm4, %xmm0
1578 ; SSE2-NEXT: pminsw %xmm2, %xmm0
1579 ; SSE2-NEXT: pxor %xmm4, %xmm0
1580 ; SSE2-NEXT: pxor %xmm4, %xmm3
1581 ; SSE2-NEXT: pxor %xmm4, %xmm1
1582 ; SSE2-NEXT: pminsw %xmm3, %xmm1
1583 ; SSE2-NEXT: pxor %xmm4, %xmm1
1586 ; SSE41-LABEL: min_le_v16i16:
1588 ; SSE41-NEXT: pminuw %xmm2, %xmm0
1589 ; SSE41-NEXT: pminuw %xmm3, %xmm1
1592 ; SSE42-LABEL: min_le_v16i16:
1594 ; SSE42-NEXT: pminuw %xmm2, %xmm0
1595 ; SSE42-NEXT: pminuw %xmm3, %xmm1
1598 ; AVX1-LABEL: min_le_v16i16:
1600 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1601 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
1602 ; AVX1-NEXT: vpminuw %xmm2, %xmm3, %xmm2
1603 ; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
1604 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1607 ; AVX2-LABEL: min_le_v16i16:
1609 ; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0
1612 ; AVX512-LABEL: min_le_v16i16:
1614 ; AVX512-NEXT: vpminuw %ymm1, %ymm0, %ymm0
1616 %1 = icmp ule <16 x i16> %a, %b
1617 %2 = select <16 x i1> %1, <16 x i16> %a, <16 x i16> %b
1621 define <16 x i8> @min_le_v16i8(<16 x i8> %a, <16 x i8> %b) {
1622 ; SSE-LABEL: min_le_v16i8:
1624 ; SSE-NEXT: pminub %xmm1, %xmm0
1627 ; AVX-LABEL: min_le_v16i8:
1629 ; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
1631 %1 = icmp ule <16 x i8> %a, %b
1632 %2 = select <16 x i1> %1, <16 x i8> %a, <16 x i8> %b
1636 define <32 x i8> @min_le_v32i8(<32 x i8> %a, <32 x i8> %b) {
1637 ; SSE-LABEL: min_le_v32i8:
1639 ; SSE-NEXT: pminub %xmm2, %xmm0
1640 ; SSE-NEXT: pminub %xmm3, %xmm1
1643 ; AVX1-LABEL: min_le_v32i8:
1645 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
1646 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
1647 ; AVX1-NEXT: vpminub %xmm2, %xmm3, %xmm2
1648 ; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
1649 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
1652 ; AVX2-LABEL: min_le_v32i8:
1654 ; AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
1657 ; AVX512-LABEL: min_le_v32i8:
1659 ; AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
1661 %1 = icmp ule <32 x i8> %a, %b
1662 %2 = select <32 x i1> %1, <32 x i8> %a, <32 x i8> %b
1670 define <2 x i64> @max_gt_v2i64c() {
1671 ; SSE-LABEL: max_gt_v2i64c:
1673 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
1676 ; AVX-LABEL: max_gt_v2i64c:
1678 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
1680 %1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
1681 %2 = insertelement <2 x i64> <i64 -1, i64 1>, i64 -1, i32 0
1682 %3 = icmp ugt <2 x i64> %1, %2
1683 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
1687 define <4 x i64> @max_gt_v4i64c() {
1688 ; SSE-LABEL: max_gt_v4i64c:
1690 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
1691 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
1694 ; AVX-LABEL: max_gt_v4i64c:
1696 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
1698 %1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
1699 %2 = insertelement <4 x i64> <i64 -1, i64 -7, i64 7, i64 1>, i64 -1, i32 0
1700 %3 = icmp ugt <4 x i64> %1, %2
1701 %4 = select <4 x i1> %3, <4 x i64> %1, <4 x i64> %2
1705 define <4 x i32> @max_gt_v4i32c() {
1706 ; SSE-LABEL: max_gt_v4i32c:
1708 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
1711 ; AVX-LABEL: max_gt_v4i32c:
1713 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
1715 %1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
1716 %2 = insertelement <4 x i32> <i32 -1, i32 -7, i32 7, i32 1>, i32 -1, i32 0
1717 %3 = icmp ugt <4 x i32> %1, %2
1718 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
1722 define <8 x i32> @max_gt_v8i32c() {
1723 ; SSE-LABEL: max_gt_v8i32c:
1725 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
1726 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
1729 ; AVX-LABEL: max_gt_v8i32c:
1731 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
1733 %1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
1734 %2 = insertelement <8 x i32> <i32 -1, i32 -3, i32 -5, i32 -7, i32 7, i32 5, i32 3, i32 1>, i32 -1, i32 0
1735 %3 = icmp ugt <8 x i32> %1, %2
1736 %4 = select <8 x i1> %3, <8 x i32> %1, <8 x i32> %2
1740 define <8 x i16> @max_gt_v8i16c() {
1741 ; SSE-LABEL: max_gt_v8i16c:
1743 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
1746 ; AVX-LABEL: max_gt_v8i16c:
1748 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
1750 %1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
1751 %2 = insertelement <8 x i16> <i16 -1, i16 -3, i16 -5, i16 -7, i16 7, i16 5, i16 3, i16 1>, i16 -1, i32 0
1752 %3 = icmp ugt <8 x i16> %1, %2
1753 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
1757 define <16 x i16> @max_gt_v16i16c() {
1758 ; SSE-LABEL: max_gt_v16i16c:
1760 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
1761 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
1764 ; AVX-LABEL: max_gt_v16i16c:
1766 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
1768 %1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
1769 %2 = insertelement <16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 0, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 -1, i32 0
1770 %3 = icmp ugt <16 x i16> %1, %2
1771 %4 = select <16 x i1> %3, <16 x i16> %1, <16 x i16> %2
1775 define <16 x i8> @max_gt_v16i8c() {
1776 ; SSE-LABEL: max_gt_v16i8c:
1778 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
1781 ; AVX-LABEL: max_gt_v16i8c:
1783 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
1785 %1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
1786 %2 = insertelement <16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, i8 -1, i32 0
1787 %3 = icmp ugt <16 x i8> %1, %2
1788 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
1792 define <2 x i64> @max_ge_v2i64c() {
1793 ; SSE-LABEL: max_ge_v2i64c:
1795 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551615,7]
1798 ; AVX-LABEL: max_ge_v2i64c:
1800 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551615,7]
1802 %1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
1803 %2 = insertelement <2 x i64> <i64 -1, i64 1>, i64 -1, i32 0
1804 %3 = icmp uge <2 x i64> %1, %2
1805 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
1809 define <4 x i64> @max_ge_v4i64c() {
1810 ; SSE-LABEL: max_ge_v4i64c:
1812 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,7]
1813 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
1816 ; AVX-LABEL: max_ge_v4i64c:
1818 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551615,18446744073709551615,7,7]
1820 %1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
1821 %2 = insertelement <4 x i64> <i64 -1, i64 -7, i64 7, i64 1>, i64 -1, i32 0
1822 %3 = icmp uge <4 x i64> %1, %2
1823 %4 = select <4 x i1> %3, <4 x i64> %1, <4 x i64> %2
1827 define <4 x i32> @max_ge_v4i32c() {
1828 ; SSE-LABEL: max_ge_v4i32c:
1830 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
1833 ; AVX-LABEL: max_ge_v4i32c:
1835 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,7,7]
1837 %1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
1838 %2 = insertelement <4 x i32> <i32 -1, i32 -7, i32 7, i32 1>, i32 -1, i32 0
1839 %3 = icmp uge <4 x i32> %1, %2
1840 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
1844 define <8 x i32> @max_ge_v8i32c() {
1845 ; SSE-LABEL: max_ge_v8i32c:
1847 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967295,4294967293,4294967293,4294967295]
1848 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,5,5,7]
1851 ; AVX-LABEL: max_ge_v8i32c:
1853 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967295,4294967293,4294967293,4294967295,7,5,5,7]
1855 %1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
1856 %2 = insertelement <8 x i32> <i32 -1, i32 -3, i32 -5, i32 -7, i32 7, i32 5, i32 3, i32 1>, i32 -1, i32 0
1857 %3 = icmp uge <8 x i32> %1, %2
1858 %4 = select <8 x i1> %3, <8 x i32> %1, <8 x i32> %2
1862 define <8 x i16> @max_ge_v8i16c() {
1863 ; SSE-LABEL: max_ge_v8i16c:
1865 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
1868 ; AVX-LABEL: max_ge_v8i16c:
1870 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65533,65533,65535,7,5,5,7]
1872 %1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
1873 %2 = insertelement <8 x i16> <i16 -1, i16 -3, i16 -5, i16 -7, i16 7, i16 5, i16 3, i16 1>, i16 -1, i32 0
1874 %3 = icmp uge <8 x i16> %1, %2
1875 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
1879 define <16 x i16> @max_ge_v16i16c() {
1880 ; SSE-LABEL: max_ge_v16i16c:
1882 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65534,65533,65532,65533,65534,65535,0]
1883 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [7,6,5,4,5,6,7,8]
1886 ; AVX-LABEL: max_ge_v16i16c:
1888 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65534,65533,65532,65533,65534,65535,0,7,6,5,4,5,6,7,8]
1890 %1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
1891 %2 = insertelement <16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 0, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 -1, i32 0
1892 %3 = icmp uge <16 x i16> %1, %2
1893 %4 = select <16 x i1> %3, <16 x i16> %1, <16 x i16> %2
1897 define <16 x i8> @max_ge_v16i8c() {
1898 ; SSE-LABEL: max_ge_v16i8c:
1900 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
1903 ; AVX-LABEL: max_ge_v16i8c:
1905 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [255,254,253,252,253,254,255,0,7,6,5,4,5,6,7,8]
1907 %1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
1908 %2 = insertelement <16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, i8 -1, i32 0
1909 %3 = icmp uge <16 x i8> %1, %2
1910 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
1914 define <2 x i64> @min_lt_v2i64c() {
1915 ; SSE-LABEL: min_lt_v2i64c:
1917 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
1920 ; AVX-LABEL: min_lt_v2i64c:
1922 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
1924 %1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
1925 %2 = insertelement <2 x i64> <i64 -1, i64 1>, i64 -1, i32 0
1926 %3 = icmp ult <2 x i64> %1, %2
1927 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
1931 define <4 x i64> @min_lt_v4i64c() {
1932 ; SSE-LABEL: min_lt_v4i64c:
1934 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
1935 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
1938 ; AVX-LABEL: min_lt_v4i64c:
1940 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
1942 %1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
1943 %2 = insertelement <4 x i64> <i64 -1, i64 -7, i64 7, i64 1>, i64 -1, i32 0
1944 %3 = icmp ult <4 x i64> %1, %2
1945 %4 = select <4 x i1> %3, <4 x i64> %1, <4 x i64> %2
1949 define <4 x i32> @min_lt_v4i32c() {
1950 ; SSE-LABEL: min_lt_v4i32c:
1952 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
1955 ; AVX-LABEL: min_lt_v4i32c:
1957 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
1959 %1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
1960 %2 = insertelement <4 x i32> <i32 -1, i32 -7, i32 7, i32 1>, i32 -1, i32 0
1961 %3 = icmp ult <4 x i32> %1, %2
1962 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
1966 define <8 x i32> @min_lt_v8i32c() {
1967 ; SSE-LABEL: min_lt_v8i32c:
1969 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
1970 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
1973 ; AVX-LABEL: min_lt_v8i32c:
1975 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
1977 %1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
1978 %2 = insertelement <8 x i32> <i32 -1, i32 -3, i32 -5, i32 -7, i32 7, i32 5, i32 3, i32 1>, i32 -1, i32 0
1979 %3 = icmp ult <8 x i32> %1, %2
1980 %4 = select <8 x i1> %3, <8 x i32> %1, <8 x i32> %2
1984 define <8 x i16> @min_lt_v8i16c() {
1985 ; SSE-LABEL: min_lt_v8i16c:
1987 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,65531,65531,65529,1,3,3,1]
1990 ; AVX-LABEL: min_lt_v8i16c:
1992 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,65531,65531,65529,1,3,3,1]
1994 %1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
1995 %2 = insertelement <8 x i16> <i16 -1, i16 -3, i16 -5, i16 -7, i16 7, i16 5, i16 3, i16 1>, i16 1, i32 0
1996 %3 = icmp ult <8 x i16> %1, %2
1997 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
2001 define <16 x i16> @min_lt_v16i16c() {
2002 ; SSE-LABEL: min_lt_v16i16c:
2004 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,65530,65531,65532,65531,65530,65529,0]
2005 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
2008 ; AVX-LABEL: min_lt_v16i16c:
2010 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [1,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
2012 %1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
2013 %2 = insertelement <16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 0, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 1, i32 0
2014 %3 = icmp ult <16 x i16> %1, %2
2015 %4 = select <16 x i1> %3, <16 x i16> %1, <16 x i16> %2
2019 define <16 x i8> @min_lt_v16i8c() {
2020 ; SSE-LABEL: min_lt_v16i8c:
2022 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
2025 ; AVX-LABEL: min_lt_v16i8c:
2027 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
2029 %1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
2030 %2 = insertelement <16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, i8 1, i32 0
2031 %3 = icmp ult <16 x i8> %1, %2
2032 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
2036 define <2 x i64> @min_le_v2i64c() {
2037 ; SSE-LABEL: min_le_v2i64c:
2039 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,1]
2042 ; AVX-LABEL: min_le_v2i64c:
2044 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18446744073709551609,1]
2046 %1 = insertelement <2 x i64> <i64 -7, i64 7>, i64 -7, i32 0
2047 %2 = insertelement <2 x i64> <i64 -1, i64 1>, i64 -1, i32 0
2048 %3 = icmp ule <2 x i64> %1, %2
2049 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
2053 define <4 x i64> @min_le_v4i64c() {
2054 ; SSE-LABEL: min_le_v4i64c:
2056 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551609,18446744073709551609]
2057 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,1]
2060 ; AVX-LABEL: min_le_v4i64c:
2062 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18446744073709551609,18446744073709551609,1,1]
2064 %1 = insertelement <4 x i64> <i64 -7, i64 -1, i64 1, i64 7>, i64 -7, i32 0
2065 %2 = insertelement <4 x i64> <i64 -1, i64 -7, i64 7, i64 1>, i64 -1, i32 0
2066 %3 = icmp ule <4 x i64> %1, %2
2067 %4 = select <4 x i1> %3, <4 x i64> %1, <4 x i64> %2
2071 define <4 x i32> @min_le_v4i32c() {
2072 ; SSE-LABEL: min_le_v4i32c:
2074 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
2077 ; AVX-LABEL: min_le_v4i32c:
2079 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [4294967289,4294967289,1,1]
2081 %1 = insertelement <4 x i32> <i32 -7, i32 -1, i32 1, i32 7>, i32 -7, i32 0
2082 %2 = insertelement <4 x i32> <i32 -1, i32 -7, i32 7, i32 1>, i32 -1, i32 0
2083 %3 = icmp ule <4 x i32> %1, %2
2084 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
2088 define <8 x i32> @min_le_v8i32c() {
2089 ; SSE-LABEL: min_le_v8i32c:
2091 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [4294967289,4294967291,4294967291,4294967289]
2092 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,3,3,1]
2095 ; AVX-LABEL: min_le_v8i32c:
2097 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [4294967289,4294967291,4294967291,4294967289,1,3,3,1]
2099 %1 = insertelement <8 x i32> <i32 -7, i32 -5, i32 -3, i32 -1, i32 1, i32 3, i32 5, i32 7>, i32 -7, i32 0
2100 %2 = insertelement <8 x i32> <i32 -1, i32 -3, i32 -5, i32 -7, i32 7, i32 5, i32 3, i32 1>, i32 -1, i32 0
2101 %3 = icmp ule <8 x i32> %1, %2
2102 %4 = select <8 x i1> %3, <8 x i32> %1, <8 x i32> %2
2106 define <8 x i16> @min_le_v8i16c() {
2107 ; SSE-LABEL: min_le_v8i16c:
2109 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
2112 ; AVX-LABEL: min_le_v8i16c:
2114 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65529,65531,65531,65529,1,3,3,1]
2116 %1 = insertelement <8 x i16> <i16 -7, i16 -5, i16 -3, i16 -1, i16 1, i16 3, i16 5, i16 7>, i16 -7, i32 0
2117 %2 = insertelement <8 x i16> <i16 -1, i16 -3, i16 -5, i16 -7, i16 7, i16 5, i16 3, i16 1>, i16 -1, i32 0
2118 %3 = icmp ule <8 x i16> %1, %2
2119 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
2123 define <16 x i16> @min_le_v16i16c() {
2124 ; SSE-LABEL: min_le_v16i16c:
2126 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [65529,65530,65531,65532,65531,65530,65529,0]
2127 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [1,2,3,4,3,2,1,0]
2130 ; AVX-LABEL: min_le_v16i16c:
2132 ; AVX-NEXT: vmovaps {{.*#+}} ymm0 = [65529,65530,65531,65532,65531,65530,65529,0,1,2,3,4,3,2,1,0]
2134 %1 = insertelement <16 x i16> <i16 -7, i16 -6, i16 -5, i16 -4, i16 -3, i16 -2, i16 -1, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, i16 -7, i32 0
2135 %2 = insertelement <16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 0, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, i16 -1, i32 0
2136 %3 = icmp ule <16 x i16> %1, %2
2137 %4 = select <16 x i1> %3, <16 x i16> %1, <16 x i16> %2
2141 define <16 x i8> @min_le_v16i8c() {
2142 ; SSE-LABEL: min_le_v16i8c:
2144 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
2147 ; AVX-LABEL: min_le_v16i8c:
2149 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [249,250,251,252,251,250,249,0,1,2,3,4,3,2,1,0]
2151 %1 = insertelement <16 x i8> <i8 -7, i8 -6, i8 -5, i8 -4, i8 -3, i8 -2, i8 -1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, i8 -7, i32 0
2152 %2 = insertelement <16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 0, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, i8 -1, i32 0
2153 %3 = icmp ule <16 x i8> %1, %2
2154 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2