1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=ANY,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=ANY,SSE,SSE4,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.2 | FileCheck %s --check-prefixes=ANY,SSE,SSE4,SSE42
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=ANY,AVX,AVX2
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=ANY,AVX,AVX512
8 ; There are at least 3 potential patterns corresponding to an unsigned saturated add: min, cmp with sum, cmp with not.
9 ; Test each of those patterns with i8/i16/i32/i64.
10 ; Test each of those with a constant operand and a variable operand.
11 ; Test each of those with a 128-bit vector type.
13 define i8 @unsigned_sat_constant_i8_using_min(i8 %x) {
14 ; ANY-LABEL: unsigned_sat_constant_i8_using_min:
16 ; ANY-NEXT: cmpb $-43, %dil
17 ; ANY-NEXT: movl $213, %eax
18 ; ANY-NEXT: cmovbl %edi, %eax
19 ; ANY-NEXT: addb $42, %al
20 ; ANY-NEXT: # kill: def $al killed $al killed $eax
22 %c = icmp ult i8 %x, -43
23 %s = select i1 %c, i8 %x, i8 -43
28 define i8 @unsigned_sat_constant_i8_using_cmp_sum(i8 %x) {
29 ; ANY-LABEL: unsigned_sat_constant_i8_using_cmp_sum:
31 ; ANY-NEXT: addb $42, %dil
32 ; ANY-NEXT: movzbl %dil, %ecx
33 ; ANY-NEXT: movl $255, %eax
34 ; ANY-NEXT: cmovael %ecx, %eax
35 ; ANY-NEXT: # kill: def $al killed $al killed $eax
38 %c = icmp ugt i8 %x, %a
39 %r = select i1 %c, i8 -1, i8 %a
43 define i8 @unsigned_sat_constant_i8_using_cmp_notval(i8 %x) {
44 ; ANY-LABEL: unsigned_sat_constant_i8_using_cmp_notval:
46 ; ANY-NEXT: addb $42, %dil
47 ; ANY-NEXT: movzbl %dil, %ecx
48 ; ANY-NEXT: movl $255, %eax
49 ; ANY-NEXT: cmovael %ecx, %eax
50 ; ANY-NEXT: # kill: def $al killed $al killed $eax
53 %c = icmp ugt i8 %x, -43
54 %r = select i1 %c, i8 -1, i8 %a
58 define i16 @unsigned_sat_constant_i16_using_min(i16 %x) {
59 ; ANY-LABEL: unsigned_sat_constant_i16_using_min:
61 ; ANY-NEXT: cmpw $-43, %di
62 ; ANY-NEXT: movl $65493, %eax # imm = 0xFFD5
63 ; ANY-NEXT: cmovbl %edi, %eax
64 ; ANY-NEXT: addl $42, %eax
65 ; ANY-NEXT: # kill: def $ax killed $ax killed $eax
67 %c = icmp ult i16 %x, -43
68 %s = select i1 %c, i16 %x, i16 -43
73 define i16 @unsigned_sat_constant_i16_using_cmp_sum(i16 %x) {
74 ; ANY-LABEL: unsigned_sat_constant_i16_using_cmp_sum:
76 ; ANY-NEXT: addw $42, %di
77 ; ANY-NEXT: movl $65535, %eax # imm = 0xFFFF
78 ; ANY-NEXT: cmovael %edi, %eax
79 ; ANY-NEXT: # kill: def $ax killed $ax killed $eax
82 %c = icmp ugt i16 %x, %a
83 %r = select i1 %c, i16 -1, i16 %a
87 define i16 @unsigned_sat_constant_i16_using_cmp_notval(i16 %x) {
88 ; ANY-LABEL: unsigned_sat_constant_i16_using_cmp_notval:
90 ; ANY-NEXT: addw $42, %di
91 ; ANY-NEXT: movl $65535, %eax # imm = 0xFFFF
92 ; ANY-NEXT: cmovael %edi, %eax
93 ; ANY-NEXT: # kill: def $ax killed $ax killed $eax
96 %c = icmp ugt i16 %x, -43
97 %r = select i1 %c, i16 -1, i16 %a
101 define i32 @unsigned_sat_constant_i32_using_min(i32 %x) {
102 ; ANY-LABEL: unsigned_sat_constant_i32_using_min:
104 ; ANY-NEXT: cmpl $-43, %edi
105 ; ANY-NEXT: movl $-43, %eax
106 ; ANY-NEXT: cmovbl %edi, %eax
107 ; ANY-NEXT: addl $42, %eax
109 %c = icmp ult i32 %x, -43
110 %s = select i1 %c, i32 %x, i32 -43
115 define i32 @unsigned_sat_constant_i32_using_cmp_sum(i32 %x) {
116 ; ANY-LABEL: unsigned_sat_constant_i32_using_cmp_sum:
118 ; ANY-NEXT: addl $42, %edi
119 ; ANY-NEXT: movl $-1, %eax
120 ; ANY-NEXT: cmovael %edi, %eax
123 %c = icmp ugt i32 %x, %a
124 %r = select i1 %c, i32 -1, i32 %a
128 define i32 @unsigned_sat_constant_i32_using_cmp_notval(i32 %x) {
129 ; ANY-LABEL: unsigned_sat_constant_i32_using_cmp_notval:
131 ; ANY-NEXT: addl $42, %edi
132 ; ANY-NEXT: movl $-1, %eax
133 ; ANY-NEXT: cmovael %edi, %eax
136 %c = icmp ugt i32 %x, -43
137 %r = select i1 %c, i32 -1, i32 %a
141 define i64 @unsigned_sat_constant_i64_using_min(i64 %x) {
142 ; ANY-LABEL: unsigned_sat_constant_i64_using_min:
144 ; ANY-NEXT: cmpq $-43, %rdi
145 ; ANY-NEXT: movq $-43, %rax
146 ; ANY-NEXT: cmovbq %rdi, %rax
147 ; ANY-NEXT: addq $42, %rax
149 %c = icmp ult i64 %x, -43
150 %s = select i1 %c, i64 %x, i64 -43
155 define i64 @unsigned_sat_constant_i64_using_cmp_sum(i64 %x) {
156 ; ANY-LABEL: unsigned_sat_constant_i64_using_cmp_sum:
158 ; ANY-NEXT: addq $42, %rdi
159 ; ANY-NEXT: movq $-1, %rax
160 ; ANY-NEXT: cmovaeq %rdi, %rax
163 %c = icmp ugt i64 %x, %a
164 %r = select i1 %c, i64 -1, i64 %a
168 define i64 @unsigned_sat_constant_i64_using_cmp_notval(i64 %x) {
169 ; ANY-LABEL: unsigned_sat_constant_i64_using_cmp_notval:
171 ; ANY-NEXT: addq $42, %rdi
172 ; ANY-NEXT: movq $-1, %rax
173 ; ANY-NEXT: cmovaeq %rdi, %rax
176 %c = icmp ugt i64 %x, -43
177 %r = select i1 %c, i64 -1, i64 %a
181 define i8 @unsigned_sat_variable_i8_using_min(i8 %x, i8 %y) {
182 ; ANY-LABEL: unsigned_sat_variable_i8_using_min:
184 ; ANY-NEXT: movl %esi, %eax
186 ; ANY-NEXT: cmpb %al, %dil
187 ; ANY-NEXT: movzbl %al, %eax
188 ; ANY-NEXT: cmovbl %edi, %eax
189 ; ANY-NEXT: addb %sil, %al
190 ; ANY-NEXT: # kill: def $al killed $al killed $eax
192 %noty = xor i8 %y, -1
193 %c = icmp ult i8 %x, %noty
194 %s = select i1 %c, i8 %x, i8 %noty
199 define i8 @unsigned_sat_variable_i8_using_cmp_sum(i8 %x, i8 %y) {
200 ; ANY-LABEL: unsigned_sat_variable_i8_using_cmp_sum:
202 ; ANY-NEXT: addb %sil, %dil
203 ; ANY-NEXT: movzbl %dil, %ecx
204 ; ANY-NEXT: movl $255, %eax
205 ; ANY-NEXT: cmovael %ecx, %eax
206 ; ANY-NEXT: # kill: def $al killed $al killed $eax
209 %c = icmp ugt i8 %x, %a
210 %r = select i1 %c, i8 -1, i8 %a
214 define i8 @unsigned_sat_variable_i8_using_cmp_notval(i8 %x, i8 %y) {
215 ; ANY-LABEL: unsigned_sat_variable_i8_using_cmp_notval:
217 ; ANY-NEXT: addb %dil, %sil
218 ; ANY-NEXT: movzbl %sil, %ecx
219 ; ANY-NEXT: movl $255, %eax
220 ; ANY-NEXT: cmovael %ecx, %eax
221 ; ANY-NEXT: # kill: def $al killed $al killed $eax
223 %noty = xor i8 %y, -1
225 %c = icmp ugt i8 %x, %noty
226 %r = select i1 %c, i8 -1, i8 %a
230 define i16 @unsigned_sat_variable_i16_using_min(i16 %x, i16 %y) {
231 ; ANY-LABEL: unsigned_sat_variable_i16_using_min:
233 ; ANY-NEXT: # kill: def $esi killed $esi def $rsi
234 ; ANY-NEXT: movl %esi, %eax
235 ; ANY-NEXT: notl %eax
236 ; ANY-NEXT: cmpw %ax, %di
237 ; ANY-NEXT: cmovbl %edi, %eax
238 ; ANY-NEXT: addl %esi, %eax
239 ; ANY-NEXT: # kill: def $ax killed $ax killed $eax
241 %noty = xor i16 %y, -1
242 %c = icmp ult i16 %x, %noty
243 %s = select i1 %c, i16 %x, i16 %noty
248 define i16 @unsigned_sat_variable_i16_using_cmp_sum(i16 %x, i16 %y) {
249 ; ANY-LABEL: unsigned_sat_variable_i16_using_cmp_sum:
251 ; ANY-NEXT: addw %si, %di
252 ; ANY-NEXT: movl $65535, %eax # imm = 0xFFFF
253 ; ANY-NEXT: cmovael %edi, %eax
254 ; ANY-NEXT: # kill: def $ax killed $ax killed $eax
257 %c = icmp ugt i16 %x, %a
258 %r = select i1 %c, i16 -1, i16 %a
262 define i16 @unsigned_sat_variable_i16_using_cmp_notval(i16 %x, i16 %y) {
263 ; ANY-LABEL: unsigned_sat_variable_i16_using_cmp_notval:
265 ; ANY-NEXT: addw %di, %si
266 ; ANY-NEXT: movl $65535, %eax # imm = 0xFFFF
267 ; ANY-NEXT: cmovael %esi, %eax
268 ; ANY-NEXT: # kill: def $ax killed $ax killed $eax
270 %noty = xor i16 %y, -1
272 %c = icmp ugt i16 %x, %noty
273 %r = select i1 %c, i16 -1, i16 %a
277 define i32 @unsigned_sat_variable_i32_using_min(i32 %x, i32 %y) {
278 ; ANY-LABEL: unsigned_sat_variable_i32_using_min:
280 ; ANY-NEXT: # kill: def $esi killed $esi def $rsi
281 ; ANY-NEXT: movl %esi, %eax
282 ; ANY-NEXT: notl %eax
283 ; ANY-NEXT: cmpl %eax, %edi
284 ; ANY-NEXT: cmovbl %edi, %eax
285 ; ANY-NEXT: addl %esi, %eax
287 %noty = xor i32 %y, -1
288 %c = icmp ult i32 %x, %noty
289 %s = select i1 %c, i32 %x, i32 %noty
294 define i32 @unsigned_sat_variable_i32_using_cmp_sum(i32 %x, i32 %y) {
295 ; ANY-LABEL: unsigned_sat_variable_i32_using_cmp_sum:
297 ; ANY-NEXT: addl %esi, %edi
298 ; ANY-NEXT: movl $-1, %eax
299 ; ANY-NEXT: cmovael %edi, %eax
302 %c = icmp ugt i32 %x, %a
303 %r = select i1 %c, i32 -1, i32 %a
307 define i32 @unsigned_sat_variable_i32_using_cmp_notval(i32 %x, i32 %y) {
308 ; ANY-LABEL: unsigned_sat_variable_i32_using_cmp_notval:
310 ; ANY-NEXT: addl %esi, %edi
311 ; ANY-NEXT: movl $-1, %eax
312 ; ANY-NEXT: cmovael %edi, %eax
314 %noty = xor i32 %y, -1
316 %c = icmp ugt i32 %x, %noty
317 %r = select i1 %c, i32 -1, i32 %a
321 define i64 @unsigned_sat_variable_i64_using_min(i64 %x, i64 %y) {
322 ; ANY-LABEL: unsigned_sat_variable_i64_using_min:
324 ; ANY-NEXT: movq %rsi, %rax
325 ; ANY-NEXT: notq %rax
326 ; ANY-NEXT: cmpq %rax, %rdi
327 ; ANY-NEXT: cmovbq %rdi, %rax
328 ; ANY-NEXT: addq %rsi, %rax
330 %noty = xor i64 %y, -1
331 %c = icmp ult i64 %x, %noty
332 %s = select i1 %c, i64 %x, i64 %noty
337 define i64 @unsigned_sat_variable_i64_using_cmp_sum(i64 %x, i64 %y) {
338 ; ANY-LABEL: unsigned_sat_variable_i64_using_cmp_sum:
340 ; ANY-NEXT: addq %rsi, %rdi
341 ; ANY-NEXT: movq $-1, %rax
342 ; ANY-NEXT: cmovaeq %rdi, %rax
345 %c = icmp ugt i64 %x, %a
346 %r = select i1 %c, i64 -1, i64 %a
350 define i64 @unsigned_sat_variable_i64_using_cmp_notval(i64 %x, i64 %y) {
351 ; ANY-LABEL: unsigned_sat_variable_i64_using_cmp_notval:
353 ; ANY-NEXT: addq %rsi, %rdi
354 ; ANY-NEXT: movq $-1, %rax
355 ; ANY-NEXT: cmovaeq %rdi, %rax
357 %noty = xor i64 %y, -1
359 %c = icmp ugt i64 %x, %noty
360 %r = select i1 %c, i64 -1, i64 %a
364 define <16 x i8> @unsigned_sat_constant_v16i8_using_min(<16 x i8> %x) {
365 ; SSE-LABEL: unsigned_sat_constant_v16i8_using_min:
367 ; SSE-NEXT: pminub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
368 ; SSE-NEXT: paddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
371 ; AVX-LABEL: unsigned_sat_constant_v16i8_using_min:
373 ; AVX-NEXT: vpminub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
374 ; AVX-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
376 %c = icmp ult <16 x i8> %x, <i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43>
377 %s = select <16 x i1> %c, <16 x i8> %x, <16 x i8> <i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43>
378 %r = add <16 x i8> %s, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
382 define <16 x i8> @unsigned_sat_constant_v16i8_using_cmp_sum(<16 x i8> %x) {
383 ; SSE-LABEL: unsigned_sat_constant_v16i8_using_cmp_sum:
385 ; SSE-NEXT: paddusb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
388 ; AVX-LABEL: unsigned_sat_constant_v16i8_using_cmp_sum:
390 ; AVX-NEXT: vpaddusb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
392 %a = add <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
393 %c = icmp ugt <16 x i8> %x, %a
394 %r = select <16 x i1> %c, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %a
398 define <16 x i8> @unsigned_sat_constant_v16i8_using_cmp_notval(<16 x i8> %x) {
399 ; SSE-LABEL: unsigned_sat_constant_v16i8_using_cmp_notval:
401 ; SSE-NEXT: paddusb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
404 ; AVX-LABEL: unsigned_sat_constant_v16i8_using_cmp_notval:
406 ; AVX-NEXT: vpaddusb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
408 %a = add <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
409 %c = icmp ugt <16 x i8> %x, <i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43>
410 %r = select <16 x i1> %c, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %a
414 define <8 x i16> @unsigned_sat_constant_v8i16_using_min(<8 x i16> %x) {
415 ; SSE2-LABEL: unsigned_sat_constant_v8i16_using_min:
417 ; SSE2-NEXT: movdqa %xmm0, %xmm1
418 ; SSE2-NEXT: psubusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
419 ; SSE2-NEXT: psubw %xmm1, %xmm0
420 ; SSE2-NEXT: paddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
423 ; SSE4-LABEL: unsigned_sat_constant_v8i16_using_min:
425 ; SSE4-NEXT: pminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
426 ; SSE4-NEXT: paddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
429 ; AVX-LABEL: unsigned_sat_constant_v8i16_using_min:
431 ; AVX-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
432 ; AVX-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
434 %c = icmp ult <8 x i16> %x, <i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43>
435 %s = select <8 x i1> %c, <8 x i16> %x, <8 x i16> <i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43>
436 %r = add <8 x i16> %s, <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42>
440 define <8 x i16> @unsigned_sat_constant_v8i16_using_cmp_sum(<8 x i16> %x) {
441 ; SSE-LABEL: unsigned_sat_constant_v8i16_using_cmp_sum:
443 ; SSE-NEXT: paddusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
446 ; AVX-LABEL: unsigned_sat_constant_v8i16_using_cmp_sum:
448 ; AVX-NEXT: vpaddusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
450 %a = add <8 x i16> %x, <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42>
451 %c = icmp ugt <8 x i16> %x, %a
452 %r = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %a
456 define <8 x i16> @unsigned_sat_constant_v8i16_using_cmp_notval(<8 x i16> %x) {
457 ; SSE-LABEL: unsigned_sat_constant_v8i16_using_cmp_notval:
459 ; SSE-NEXT: paddusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
462 ; AVX-LABEL: unsigned_sat_constant_v8i16_using_cmp_notval:
464 ; AVX-NEXT: vpaddusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
466 %a = add <8 x i16> %x, <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42>
467 %c = icmp ugt <8 x i16> %x, <i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43>
468 %r = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %a
472 define <4 x i32> @unsigned_sat_constant_v4i32_using_min(<4 x i32> %x) {
473 ; SSE2-LABEL: unsigned_sat_constant_v4i32_using_min:
475 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648,2147483648,2147483648]
476 ; SSE2-NEXT: pxor %xmm0, %xmm1
477 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483605,2147483605,2147483605,2147483605]
478 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
479 ; SSE2-NEXT: pand %xmm2, %xmm0
480 ; SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
481 ; SSE2-NEXT: por %xmm2, %xmm0
482 ; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
485 ; SSE4-LABEL: unsigned_sat_constant_v4i32_using_min:
487 ; SSE4-NEXT: pminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
488 ; SSE4-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
491 ; AVX2-LABEL: unsigned_sat_constant_v4i32_using_min:
493 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967253,4294967253,4294967253,4294967253]
494 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
495 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [42,42,42,42]
496 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
499 ; AVX512-LABEL: unsigned_sat_constant_v4i32_using_min:
501 ; AVX512-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
502 ; AVX512-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
504 %c = icmp ult <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
505 %s = select <4 x i1> %c, <4 x i32> %x, <4 x i32> <i32 -43, i32 -43, i32 -43, i32 -43>
506 %r = add <4 x i32> %s, <i32 42, i32 42, i32 42, i32 42>
510 define <4 x i32> @unsigned_sat_constant_v4i32_using_cmp_sum(<4 x i32> %x) {
511 ; SSE2-LABEL: unsigned_sat_constant_v4i32_using_cmp_sum:
513 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [42,42,42,42]
514 ; SSE2-NEXT: paddd %xmm0, %xmm1
515 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
516 ; SSE2-NEXT: pxor %xmm2, %xmm0
517 ; SSE2-NEXT: pxor %xmm1, %xmm2
518 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
519 ; SSE2-NEXT: por %xmm1, %xmm0
522 ; SSE4-LABEL: unsigned_sat_constant_v4i32_using_cmp_sum:
524 ; SSE4-NEXT: pminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
525 ; SSE4-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
528 ; AVX2-LABEL: unsigned_sat_constant_v4i32_using_cmp_sum:
530 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [42,42,42,42]
531 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4294967253,4294967253,4294967253,4294967253]
532 ; AVX2-NEXT: vpminud %xmm2, %xmm0, %xmm0
533 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
536 ; AVX512-LABEL: unsigned_sat_constant_v4i32_using_cmp_sum:
538 ; AVX512-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
539 ; AVX512-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
541 %a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 42>
542 %c = icmp ugt <4 x i32> %x, %a
543 %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
547 define <4 x i32> @unsigned_sat_constant_v4i32_using_cmp_notval(<4 x i32> %x) {
548 ; SSE2-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval:
550 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [42,42,42,42]
551 ; SSE2-NEXT: paddd %xmm0, %xmm1
552 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
553 ; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
554 ; SSE2-NEXT: por %xmm1, %xmm0
557 ; SSE4-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval:
559 ; SSE4-NEXT: pminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
560 ; SSE4-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
563 ; AVX2-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval:
565 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [42,42,42,42]
566 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4294967253,4294967253,4294967253,4294967253]
567 ; AVX2-NEXT: vpminud %xmm2, %xmm0, %xmm0
568 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
571 ; AVX512-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval:
573 ; AVX512-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
574 ; AVX512-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
576 %a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 42>
577 %c = icmp ugt <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
578 %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
582 define <4 x i32> @unsigned_sat_constant_v4i32_using_cmp_notval_nonsplat(<4 x i32> %x) {
583 ; SSE2-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval_nonsplat:
585 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [43,44,45,46]
586 ; SSE2-NEXT: paddd %xmm0, %xmm1
587 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
588 ; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
589 ; SSE2-NEXT: por %xmm1, %xmm0
592 ; SSE4-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval_nonsplat:
594 ; SSE4-NEXT: pminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
595 ; SSE4-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
598 ; AVX-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval_nonsplat:
600 ; AVX-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
601 ; AVX-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
603 %a = add <4 x i32> %x, <i32 43, i32 44, i32 45, i32 46>
604 %c = icmp ugt <4 x i32> %x, <i32 -44, i32 -45, i32 -46, i32 -47>
605 %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
609 define <2 x i64> @unsigned_sat_constant_v2i64_using_min(<2 x i64> %x) {
610 ; SSE2-LABEL: unsigned_sat_constant_v2i64_using_min:
612 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259456,9223372039002259456]
613 ; SSE2-NEXT: pxor %xmm0, %xmm1
614 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372034707292117,9223372034707292117]
615 ; SSE2-NEXT: movdqa %xmm2, %xmm3
616 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
617 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
618 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm1
619 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
620 ; SSE2-NEXT: pand %xmm4, %xmm1
621 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
622 ; SSE2-NEXT: por %xmm1, %xmm2
623 ; SSE2-NEXT: pand %xmm2, %xmm0
624 ; SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
625 ; SSE2-NEXT: por %xmm2, %xmm0
626 ; SSE2-NEXT: paddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
629 ; SSE41-LABEL: unsigned_sat_constant_v2i64_using_min:
631 ; SSE41-NEXT: movdqa %xmm0, %xmm1
632 ; SSE41-NEXT: movapd {{.*#+}} xmm2 = [18446744073709551573,18446744073709551573]
633 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
634 ; SSE41-NEXT: pxor %xmm1, %xmm0
635 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292117,9223372034707292117]
636 ; SSE41-NEXT: movdqa %xmm3, %xmm4
637 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
638 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
639 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
640 ; SSE41-NEXT: pand %xmm4, %xmm0
641 ; SSE41-NEXT: por %xmm3, %xmm0
642 ; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
643 ; SSE41-NEXT: paddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
644 ; SSE41-NEXT: movdqa %xmm2, %xmm0
647 ; SSE42-LABEL: unsigned_sat_constant_v2i64_using_min:
649 ; SSE42-NEXT: movdqa %xmm0, %xmm1
650 ; SSE42-NEXT: movapd {{.*#+}} xmm2 = [18446744073709551573,18446744073709551573]
651 ; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
652 ; SSE42-NEXT: pxor %xmm0, %xmm3
653 ; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775765,9223372036854775765]
654 ; SSE42-NEXT: pcmpgtq %xmm3, %xmm0
655 ; SSE42-NEXT: blendvpd %xmm0, %xmm1, %xmm2
656 ; SSE42-NEXT: paddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
657 ; SSE42-NEXT: movdqa %xmm2, %xmm0
660 ; AVX2-LABEL: unsigned_sat_constant_v2i64_using_min:
662 ; AVX2-NEXT: vmovapd {{.*#+}} xmm1 = [18446744073709551573,18446744073709551573]
663 ; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
664 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775765,9223372036854775765]
665 ; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
666 ; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
667 ; AVX2-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
670 ; AVX512-LABEL: unsigned_sat_constant_v2i64_using_min:
672 ; AVX512-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
673 ; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
675 %c = icmp ult <2 x i64> %x, <i64 -43, i64 -43>
676 %s = select <2 x i1> %c, <2 x i64> %x, <2 x i64> <i64 -43, i64 -43>
677 %r = add <2 x i64> %s, <i64 42, i64 42>
681 define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_sum(<2 x i64> %x) {
682 ; SSE2-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum:
684 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [42,42]
685 ; SSE2-NEXT: paddq %xmm0, %xmm1
686 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
687 ; SSE2-NEXT: pxor %xmm2, %xmm0
688 ; SSE2-NEXT: pxor %xmm1, %xmm2
689 ; SSE2-NEXT: movdqa %xmm0, %xmm3
690 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
691 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
692 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
693 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
694 ; SSE2-NEXT: pand %xmm4, %xmm2
695 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
696 ; SSE2-NEXT: por %xmm1, %xmm0
697 ; SSE2-NEXT: por %xmm2, %xmm0
700 ; SSE41-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum:
702 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [42,42]
703 ; SSE41-NEXT: paddq %xmm0, %xmm1
704 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
705 ; SSE41-NEXT: pxor %xmm2, %xmm0
706 ; SSE41-NEXT: pxor %xmm1, %xmm2
707 ; SSE41-NEXT: movdqa %xmm0, %xmm3
708 ; SSE41-NEXT: pcmpgtd %xmm2, %xmm3
709 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
710 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
711 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
712 ; SSE41-NEXT: pand %xmm4, %xmm2
713 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
714 ; SSE41-NEXT: por %xmm1, %xmm0
715 ; SSE41-NEXT: por %xmm2, %xmm0
718 ; SSE42-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum:
720 ; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
721 ; SSE42-NEXT: movdqa %xmm0, %xmm1
722 ; SSE42-NEXT: pxor %xmm2, %xmm1
723 ; SSE42-NEXT: paddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
724 ; SSE42-NEXT: pxor %xmm0, %xmm2
725 ; SSE42-NEXT: pcmpgtq %xmm2, %xmm1
726 ; SSE42-NEXT: por %xmm0, %xmm1
727 ; SSE42-NEXT: movdqa %xmm1, %xmm0
730 ; AVX2-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum:
732 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
733 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm2
734 ; AVX2-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
735 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm1
736 ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
737 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
740 ; AVX512-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum:
742 ; AVX512-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
743 ; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
745 %a = add <2 x i64> %x, <i64 42, i64 42>
746 %c = icmp ugt <2 x i64> %x, %a
747 %r = select <2 x i1> %c, <2 x i64> <i64 -1, i64 -1>, <2 x i64> %a
751 define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_notval(<2 x i64> %x) {
752 ; SSE2-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval:
754 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [42,42]
755 ; SSE2-NEXT: paddq %xmm0, %xmm1
756 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
757 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372034707292117,9223372034707292117]
758 ; SSE2-NEXT: movdqa %xmm0, %xmm3
759 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
760 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
761 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
762 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
763 ; SSE2-NEXT: pand %xmm4, %xmm2
764 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
765 ; SSE2-NEXT: por %xmm1, %xmm0
766 ; SSE2-NEXT: por %xmm2, %xmm0
769 ; SSE41-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval:
771 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [42,42]
772 ; SSE41-NEXT: paddq %xmm0, %xmm1
773 ; SSE41-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
774 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372034707292117,9223372034707292117]
775 ; SSE41-NEXT: movdqa %xmm0, %xmm3
776 ; SSE41-NEXT: pcmpgtd %xmm2, %xmm3
777 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
778 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm0
779 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
780 ; SSE41-NEXT: pand %xmm4, %xmm2
781 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
782 ; SSE41-NEXT: por %xmm1, %xmm0
783 ; SSE41-NEXT: por %xmm2, %xmm0
786 ; SSE42-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval:
788 ; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
789 ; SSE42-NEXT: movdqa %xmm0, %xmm1
790 ; SSE42-NEXT: pxor %xmm2, %xmm1
791 ; SSE42-NEXT: paddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
792 ; SSE42-NEXT: pxor %xmm0, %xmm2
793 ; SSE42-NEXT: pcmpgtq %xmm2, %xmm1
794 ; SSE42-NEXT: por %xmm0, %xmm1
795 ; SSE42-NEXT: movdqa %xmm1, %xmm0
798 ; AVX2-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval:
800 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
801 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm2
802 ; AVX2-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
803 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm1
804 ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
805 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
808 ; AVX512-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval:
810 ; AVX512-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
811 ; AVX512-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
813 %a = add <2 x i64> %x, <i64 42, i64 42>
814 %c = icmp ugt <2 x i64> %x, <i64 -43, i64 -43>
815 %r = select <2 x i1> %c, <2 x i64> <i64 -1, i64 -1>, <2 x i64> %a
819 define <16 x i8> @unsigned_sat_variable_v16i8_using_min(<16 x i8> %x, <16 x i8> %y) {
820 ; SSE-LABEL: unsigned_sat_variable_v16i8_using_min:
822 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2
823 ; SSE-NEXT: pxor %xmm1, %xmm2
824 ; SSE-NEXT: pminub %xmm2, %xmm0
825 ; SSE-NEXT: paddb %xmm1, %xmm0
828 ; AVX2-LABEL: unsigned_sat_variable_v16i8_using_min:
830 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
831 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
832 ; AVX2-NEXT: vpminub %xmm2, %xmm0, %xmm0
833 ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
836 ; AVX512-LABEL: unsigned_sat_variable_v16i8_using_min:
838 ; AVX512-NEXT: vmovdqa %xmm1, %xmm2
839 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
840 ; AVX512-NEXT: vpminub %xmm2, %xmm0, %xmm0
841 ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
843 %noty = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
844 %c = icmp ult <16 x i8> %x, %noty
845 %s = select <16 x i1> %c, <16 x i8> %x, <16 x i8> %noty
846 %r = add <16 x i8> %s, %y
850 define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_sum(<16 x i8> %x, <16 x i8> %y) {
851 ; SSE-LABEL: unsigned_sat_variable_v16i8_using_cmp_sum:
853 ; SSE-NEXT: paddusb %xmm1, %xmm0
856 ; AVX-LABEL: unsigned_sat_variable_v16i8_using_cmp_sum:
858 ; AVX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0
860 %a = add <16 x i8> %x, %y
861 %c = icmp ugt <16 x i8> %x, %a
862 %r = select <16 x i1> %c, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %a
866 define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_notval(<16 x i8> %x, <16 x i8> %y) {
867 ; SSE-LABEL: unsigned_sat_variable_v16i8_using_cmp_notval:
869 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2
870 ; SSE-NEXT: movdqa %xmm0, %xmm3
871 ; SSE-NEXT: paddb %xmm1, %xmm3
872 ; SSE-NEXT: pxor %xmm2, %xmm1
873 ; SSE-NEXT: pminub %xmm0, %xmm1
874 ; SSE-NEXT: pcmpeqb %xmm1, %xmm0
875 ; SSE-NEXT: pxor %xmm2, %xmm0
876 ; SSE-NEXT: por %xmm3, %xmm0
879 ; AVX2-LABEL: unsigned_sat_variable_v16i8_using_cmp_notval:
881 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
882 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
883 ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm1
884 ; AVX2-NEXT: vpminub %xmm3, %xmm0, %xmm3
885 ; AVX2-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
886 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
887 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
890 ; AVX512-LABEL: unsigned_sat_variable_v16i8_using_cmp_notval:
892 ; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
893 ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm3
894 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm1
895 ; AVX512-NEXT: vpminub %xmm1, %xmm0, %xmm1
896 ; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
897 ; AVX512-NEXT: vpternlogq $222, %xmm2, %xmm3, %xmm0
899 %noty = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
900 %a = add <16 x i8> %x, %y
901 %c = icmp ugt <16 x i8> %x, %noty
902 %r = select <16 x i1> %c, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %a
906 define <8 x i16> @unsigned_sat_variable_v8i16_using_min(<8 x i16> %x, <8 x i16> %y) {
907 ; SSE2-LABEL: unsigned_sat_variable_v8i16_using_min:
909 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
910 ; SSE2-NEXT: pxor %xmm1, %xmm2
911 ; SSE2-NEXT: movdqa %xmm0, %xmm3
912 ; SSE2-NEXT: psubusw %xmm2, %xmm3
913 ; SSE2-NEXT: psubw %xmm3, %xmm0
914 ; SSE2-NEXT: paddw %xmm1, %xmm0
917 ; SSE4-LABEL: unsigned_sat_variable_v8i16_using_min:
919 ; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
920 ; SSE4-NEXT: pxor %xmm1, %xmm2
921 ; SSE4-NEXT: pminuw %xmm2, %xmm0
922 ; SSE4-NEXT: paddw %xmm1, %xmm0
925 ; AVX2-LABEL: unsigned_sat_variable_v8i16_using_min:
927 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
928 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
929 ; AVX2-NEXT: vpminuw %xmm2, %xmm0, %xmm0
930 ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
933 ; AVX512-LABEL: unsigned_sat_variable_v8i16_using_min:
935 ; AVX512-NEXT: vmovdqa %xmm1, %xmm2
936 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
937 ; AVX512-NEXT: vpminuw %xmm2, %xmm0, %xmm0
938 ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
940 %noty = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
941 %c = icmp ult <8 x i16> %x, %noty
942 %s = select <8 x i1> %c, <8 x i16> %x, <8 x i16> %noty
943 %r = add <8 x i16> %s, %y
947 define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_sum(<8 x i16> %x, <8 x i16> %y) {
948 ; SSE-LABEL: unsigned_sat_variable_v8i16_using_cmp_sum:
950 ; SSE-NEXT: paddusw %xmm1, %xmm0
953 ; AVX-LABEL: unsigned_sat_variable_v8i16_using_cmp_sum:
955 ; AVX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0
957 %a = add <8 x i16> %x, %y
958 %c = icmp ugt <8 x i16> %x, %a
959 %r = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %a
963 define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_notval(<8 x i16> %x, <8 x i16> %y) {
964 ; SSE2-LABEL: unsigned_sat_variable_v8i16_using_cmp_notval:
966 ; SSE2-NEXT: movdqa %xmm0, %xmm2
967 ; SSE2-NEXT: paddw %xmm1, %xmm2
968 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
969 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
970 ; SSE2-NEXT: pcmpgtw %xmm1, %xmm0
971 ; SSE2-NEXT: por %xmm2, %xmm0
974 ; SSE4-LABEL: unsigned_sat_variable_v8i16_using_cmp_notval:
976 ; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
977 ; SSE4-NEXT: movdqa %xmm0, %xmm3
978 ; SSE4-NEXT: paddw %xmm1, %xmm3
979 ; SSE4-NEXT: pxor %xmm2, %xmm1
980 ; SSE4-NEXT: pminuw %xmm0, %xmm1
981 ; SSE4-NEXT: pcmpeqw %xmm1, %xmm0
982 ; SSE4-NEXT: pxor %xmm2, %xmm0
983 ; SSE4-NEXT: por %xmm3, %xmm0
986 ; AVX2-LABEL: unsigned_sat_variable_v8i16_using_cmp_notval:
988 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
989 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
990 ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm1
991 ; AVX2-NEXT: vpminuw %xmm3, %xmm0, %xmm3
992 ; AVX2-NEXT: vpcmpeqw %xmm3, %xmm0, %xmm0
993 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
994 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
997 ; AVX512-LABEL: unsigned_sat_variable_v8i16_using_cmp_notval:
999 ; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1000 ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm3
1001 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm1
1002 ; AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm1
1003 ; AVX512-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
1004 ; AVX512-NEXT: vpternlogq $222, %xmm2, %xmm3, %xmm0
1006 %noty = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
1007 %a = add <8 x i16> %x, %y
1008 %c = icmp ugt <8 x i16> %x, %noty
1009 %r = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %a
1013 define <4 x i32> @unsigned_sat_variable_v4i32_using_min(<4 x i32> %x, <4 x i32> %y) {
1014 ; SSE2-LABEL: unsigned_sat_variable_v4i32_using_min:
1016 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
1017 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
1018 ; SSE2-NEXT: pxor %xmm0, %xmm3
1019 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483647,2147483647,2147483647,2147483647]
1020 ; SSE2-NEXT: pxor %xmm1, %xmm4
1021 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
1022 ; SSE2-NEXT: pand %xmm4, %xmm0
1023 ; SSE2-NEXT: pxor %xmm2, %xmm4
1024 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1025 ; SSE2-NEXT: pandn %xmm4, %xmm2
1026 ; SSE2-NEXT: por %xmm2, %xmm0
1027 ; SSE2-NEXT: paddd %xmm1, %xmm0
1030 ; SSE4-LABEL: unsigned_sat_variable_v4i32_using_min:
1032 ; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
1033 ; SSE4-NEXT: pxor %xmm1, %xmm2
1034 ; SSE4-NEXT: pminud %xmm2, %xmm0
1035 ; SSE4-NEXT: paddd %xmm1, %xmm0
1038 ; AVX2-LABEL: unsigned_sat_variable_v4i32_using_min:
1040 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1041 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
1042 ; AVX2-NEXT: vpminud %xmm2, %xmm0, %xmm0
1043 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1046 ; AVX512-LABEL: unsigned_sat_variable_v4i32_using_min:
1048 ; AVX512-NEXT: vmovdqa %xmm1, %xmm2
1049 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
1050 ; AVX512-NEXT: vpminud %xmm2, %xmm0, %xmm0
1051 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1053 %noty = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
1054 %c = icmp ult <4 x i32> %x, %noty
1055 %s = select <4 x i1> %c, <4 x i32> %x, <4 x i32> %noty
1056 %r = add <4 x i32> %s, %y
1060 define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_sum(<4 x i32> %x, <4 x i32> %y) {
1061 ; SSE2-LABEL: unsigned_sat_variable_v4i32_using_cmp_sum:
1063 ; SSE2-NEXT: paddd %xmm0, %xmm1
1064 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
1065 ; SSE2-NEXT: pxor %xmm2, %xmm0
1066 ; SSE2-NEXT: pxor %xmm1, %xmm2
1067 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
1068 ; SSE2-NEXT: por %xmm1, %xmm0
1071 ; SSE4-LABEL: unsigned_sat_variable_v4i32_using_cmp_sum:
1073 ; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
1074 ; SSE4-NEXT: pxor %xmm1, %xmm2
1075 ; SSE4-NEXT: pminud %xmm2, %xmm0
1076 ; SSE4-NEXT: paddd %xmm1, %xmm0
1079 ; AVX2-LABEL: unsigned_sat_variable_v4i32_using_cmp_sum:
1081 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1082 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
1083 ; AVX2-NEXT: vpminud %xmm2, %xmm0, %xmm0
1084 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1087 ; AVX512-LABEL: unsigned_sat_variable_v4i32_using_cmp_sum:
1089 ; AVX512-NEXT: vmovdqa %xmm1, %xmm2
1090 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
1091 ; AVX512-NEXT: vpminud %xmm2, %xmm0, %xmm0
1092 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1094 %a = add <4 x i32> %x, %y
1095 %c = icmp ugt <4 x i32> %x, %a
1096 %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
1100 define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_notval(<4 x i32> %x, <4 x i32> %y) {
1101 ; SSE2-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval:
1103 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1104 ; SSE2-NEXT: paddd %xmm1, %xmm2
1105 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
1106 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1107 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
1108 ; SSE2-NEXT: por %xmm2, %xmm0
1111 ; SSE4-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval:
1113 ; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
1114 ; SSE4-NEXT: movdqa %xmm0, %xmm3
1115 ; SSE4-NEXT: paddd %xmm1, %xmm3
1116 ; SSE4-NEXT: pxor %xmm2, %xmm1
1117 ; SSE4-NEXT: pminud %xmm0, %xmm1
1118 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm0
1119 ; SSE4-NEXT: pxor %xmm2, %xmm0
1120 ; SSE4-NEXT: por %xmm3, %xmm0
1123 ; AVX2-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval:
1125 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1126 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
1127 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
1128 ; AVX2-NEXT: vpminud %xmm3, %xmm0, %xmm3
1129 ; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
1130 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
1131 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
1134 ; AVX512-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval:
1136 ; AVX512-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
1137 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm2
1138 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm1
1139 ; AVX512-NEXT: vpcmpnleud %xmm1, %xmm0, %k1
1140 ; AVX512-NEXT: vmovdqa32 %xmm3, %xmm2 {%k1}
1141 ; AVX512-NEXT: vmovdqa %xmm2, %xmm0
1143 %noty = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
1144 %a = add <4 x i32> %x, %y
1145 %c = icmp ugt <4 x i32> %x, %noty
1146 %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
1150 define <2 x i64> @unsigned_sat_variable_v2i64_using_min(<2 x i64> %x, <2 x i64> %y) {
1151 ; SSE2-LABEL: unsigned_sat_variable_v2i64_using_min:
1153 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
1154 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
1155 ; SSE2-NEXT: pxor %xmm0, %xmm3
1156 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372034707292159,9223372034707292159]
1157 ; SSE2-NEXT: pxor %xmm1, %xmm4
1158 ; SSE2-NEXT: movdqa %xmm4, %xmm5
1159 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
1160 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
1161 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
1162 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
1163 ; SSE2-NEXT: pand %xmm6, %xmm3
1164 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
1165 ; SSE2-NEXT: por %xmm3, %xmm4
1166 ; SSE2-NEXT: pand %xmm4, %xmm0
1167 ; SSE2-NEXT: pxor %xmm2, %xmm4
1168 ; SSE2-NEXT: movdqa %xmm1, %xmm2
1169 ; SSE2-NEXT: pandn %xmm4, %xmm2
1170 ; SSE2-NEXT: por %xmm2, %xmm0
1171 ; SSE2-NEXT: paddq %xmm1, %xmm0
1174 ; SSE41-LABEL: unsigned_sat_variable_v2i64_using_min:
1176 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1177 ; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
1178 ; SSE41-NEXT: pxor %xmm1, %xmm3
1179 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
1180 ; SSE41-NEXT: pxor %xmm2, %xmm0
1181 ; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [9223372034707292159,9223372034707292159]
1182 ; SSE41-NEXT: pxor %xmm1, %xmm4
1183 ; SSE41-NEXT: movdqa %xmm4, %xmm5
1184 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
1185 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
1186 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,0,2,2]
1187 ; SSE41-NEXT: pand %xmm5, %xmm0
1188 ; SSE41-NEXT: por %xmm4, %xmm0
1189 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
1190 ; SSE41-NEXT: paddq %xmm1, %xmm3
1191 ; SSE41-NEXT: movdqa %xmm3, %xmm0
1194 ; SSE42-LABEL: unsigned_sat_variable_v2i64_using_min:
1196 ; SSE42-NEXT: movdqa %xmm0, %xmm2
1197 ; SSE42-NEXT: pcmpeqd %xmm3, %xmm3
1198 ; SSE42-NEXT: pxor %xmm1, %xmm3
1199 ; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
1200 ; SSE42-NEXT: pxor %xmm0, %xmm4
1201 ; SSE42-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854775807,9223372036854775807]
1202 ; SSE42-NEXT: pxor %xmm1, %xmm0
1203 ; SSE42-NEXT: pcmpgtq %xmm4, %xmm0
1204 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm3
1205 ; SSE42-NEXT: paddq %xmm1, %xmm3
1206 ; SSE42-NEXT: movdqa %xmm3, %xmm0
1209 ; AVX2-LABEL: unsigned_sat_variable_v2i64_using_min:
1211 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1212 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
1213 ; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm3
1214 ; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm4
1215 ; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
1216 ; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0
1217 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1220 ; AVX512-LABEL: unsigned_sat_variable_v2i64_using_min:
1222 ; AVX512-NEXT: vmovdqa %xmm1, %xmm2
1223 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
1224 ; AVX512-NEXT: vpminuq %xmm2, %xmm0, %xmm0
1225 ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1227 %noty = xor <2 x i64> %y, <i64 -1, i64 -1>
1228 %c = icmp ult <2 x i64> %x, %noty
1229 %s = select <2 x i1> %c, <2 x i64> %x, <2 x i64> %noty
1230 %r = add <2 x i64> %s, %y
1234 define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_sum(<2 x i64> %x, <2 x i64> %y) {
1235 ; SSE2-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
1237 ; SSE2-NEXT: paddq %xmm0, %xmm1
1238 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
1239 ; SSE2-NEXT: pxor %xmm2, %xmm0
1240 ; SSE2-NEXT: pxor %xmm1, %xmm2
1241 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1242 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
1243 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
1244 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm2
1245 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1246 ; SSE2-NEXT: pand %xmm4, %xmm2
1247 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
1248 ; SSE2-NEXT: por %xmm1, %xmm0
1249 ; SSE2-NEXT: por %xmm2, %xmm0
1252 ; SSE41-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
1254 ; SSE41-NEXT: paddq %xmm0, %xmm1
1255 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
1256 ; SSE41-NEXT: pxor %xmm2, %xmm0
1257 ; SSE41-NEXT: pxor %xmm1, %xmm2
1258 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1259 ; SSE41-NEXT: pcmpgtd %xmm2, %xmm3
1260 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
1261 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
1262 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1263 ; SSE41-NEXT: pand %xmm4, %xmm2
1264 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
1265 ; SSE41-NEXT: por %xmm1, %xmm0
1266 ; SSE41-NEXT: por %xmm2, %xmm0
1269 ; SSE42-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
1271 ; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1272 ; SSE42-NEXT: paddq %xmm0, %xmm1
1273 ; SSE42-NEXT: pxor %xmm2, %xmm0
1274 ; SSE42-NEXT: pxor %xmm1, %xmm2
1275 ; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
1276 ; SSE42-NEXT: por %xmm1, %xmm0
1279 ; AVX2-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
1281 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1282 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
1283 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1284 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm1
1285 ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
1286 ; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
1289 ; AVX512-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
1291 ; AVX512-NEXT: vmovdqa %xmm1, %xmm2
1292 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
1293 ; AVX512-NEXT: vpminuq %xmm2, %xmm0, %xmm0
1294 ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1296 %a = add <2 x i64> %x, %y
1297 %c = icmp ugt <2 x i64> %x, %a
1298 %r = select <2 x i1> %c, <2 x i64> <i64 -1, i64 -1>, <2 x i64> %a
1302 define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_notval(<2 x i64> %x, <2 x i64> %y) {
1303 ; SSE2-LABEL: unsigned_sat_variable_v2i64_using_cmp_notval:
1305 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1306 ; SSE2-NEXT: paddq %xmm1, %xmm2
1307 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
1308 ; SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1309 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1310 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
1311 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
1312 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
1313 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
1314 ; SSE2-NEXT: pand %xmm4, %xmm1
1315 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
1316 ; SSE2-NEXT: por %xmm2, %xmm0
1317 ; SSE2-NEXT: por %xmm1, %xmm0
1320 ; SSE41-LABEL: unsigned_sat_variable_v2i64_using_cmp_notval:
1322 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1323 ; SSE41-NEXT: paddq %xmm1, %xmm2
1324 ; SSE41-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
1325 ; SSE41-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1326 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1327 ; SSE41-NEXT: pcmpgtd %xmm1, %xmm3
1328 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
1329 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
1330 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
1331 ; SSE41-NEXT: pand %xmm4, %xmm1
1332 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
1333 ; SSE41-NEXT: por %xmm2, %xmm0
1334 ; SSE41-NEXT: por %xmm1, %xmm0
1337 ; SSE42-LABEL: unsigned_sat_variable_v2i64_using_cmp_notval:
1339 ; SSE42-NEXT: movdqa %xmm0, %xmm2
1340 ; SSE42-NEXT: paddq %xmm1, %xmm2
1341 ; SSE42-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
1342 ; SSE42-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1343 ; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
1344 ; SSE42-NEXT: por %xmm2, %xmm0
1347 ; AVX2-LABEL: unsigned_sat_variable_v2i64_using_cmp_notval:
1349 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm2
1350 ; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
1351 ; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1352 ; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
1353 ; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
1356 ; AVX512-LABEL: unsigned_sat_variable_v2i64_using_cmp_notval:
1358 ; AVX512-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
1359 ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm2
1360 ; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm1
1361 ; AVX512-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
1362 ; AVX512-NEXT: vmovdqa64 %xmm3, %xmm2 {%k1}
1363 ; AVX512-NEXT: vmovdqa %xmm2, %xmm0
1365 %noty = xor <2 x i64> %y, <i64 -1, i64 -1>
1366 %a = add <2 x i64> %x, %y
1367 %c = icmp ugt <2 x i64> %x, %noty
1368 %r = select <2 x i1> %c, <2 x i64> <i64 -1, i64 -1>, <2 x i64> %a