1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64
3 ; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86
5 declare i4 @llvm.sdiv.fix.sat.i4 (i4, i4, i32)
6 declare i15 @llvm.sdiv.fix.sat.i15 (i15, i15, i32)
7 declare i16 @llvm.sdiv.fix.sat.i16 (i16, i16, i32)
8 declare i18 @llvm.sdiv.fix.sat.i18 (i18, i18, i32)
9 declare i64 @llvm.sdiv.fix.sat.i64 (i64, i64, i32)
10 declare <4 x i32> @llvm.sdiv.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32)
12 define i16 @func(i16 %x, i16 %y) nounwind {
16 ; X64-NEXT: movswl %si, %esi
17 ; X64-NEXT: movswl %di, %ecx
18 ; X64-NEXT: shll $8, %ecx
19 ; X64-NEXT: movl %ecx, %eax
21 ; X64-NEXT: idivl %esi
22 ; X64-NEXT: # kill: def $eax killed $eax def $rax
23 ; X64-NEXT: leal -1(%rax), %edi
24 ; X64-NEXT: testl %esi, %esi
26 ; X64-NEXT: testl %ecx, %ecx
28 ; X64-NEXT: xorb %sil, %cl
29 ; X64-NEXT: testl %edx, %edx
31 ; X64-NEXT: testb %cl, %dl
32 ; X64-NEXT: cmovnel %edi, %eax
33 ; X64-NEXT: cmpl $65535, %eax # imm = 0xFFFF
34 ; X64-NEXT: movl $65535, %ecx # imm = 0xFFFF
35 ; X64-NEXT: cmovgel %ecx, %eax
36 ; X64-NEXT: cmpl $-65535, %eax # imm = 0xFFFF0001
37 ; X64-NEXT: movl $-65536, %ecx # imm = 0xFFFF0000
38 ; X64-NEXT: cmovll %ecx, %eax
40 ; X64-NEXT: # kill: def $ax killed $ax killed $rax
45 ; X86-NEXT: pushl %ebx
46 ; X86-NEXT: pushl %edi
47 ; X86-NEXT: pushl %esi
48 ; X86-NEXT: movswl {{[0-9]+}}(%esp), %esi
49 ; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
50 ; X86-NEXT: shll $8, %ecx
51 ; X86-NEXT: movl %ecx, %eax
53 ; X86-NEXT: idivl %esi
54 ; X86-NEXT: leal -1(%eax), %edi
55 ; X86-NEXT: testl %esi, %esi
57 ; X86-NEXT: testl %ecx, %ecx
59 ; X86-NEXT: xorb %bl, %cl
60 ; X86-NEXT: testl %edx, %edx
62 ; X86-NEXT: testb %cl, %dl
63 ; X86-NEXT: cmovnel %edi, %eax
64 ; X86-NEXT: cmpl $65535, %eax # imm = 0xFFFF
65 ; X86-NEXT: movl $65535, %ecx # imm = 0xFFFF
66 ; X86-NEXT: cmovgel %ecx, %eax
67 ; X86-NEXT: cmpl $-65535, %eax # imm = 0xFFFF0001
68 ; X86-NEXT: movl $-65536, %ecx # imm = 0xFFFF0000
69 ; X86-NEXT: cmovll %ecx, %eax
71 ; X86-NEXT: # kill: def $ax killed $ax killed $eax
76 %tmp = call i16 @llvm.sdiv.fix.sat.i16(i16 %x, i16 %y, i32 7)
80 define i16 @func2(i8 %x, i8 %y) nounwind {
84 ; X64-NEXT: movsbl %sil, %esi
85 ; X64-NEXT: movsbl %dil, %ecx
86 ; X64-NEXT: shll $14, %ecx
87 ; X64-NEXT: movl %ecx, %eax
89 ; X64-NEXT: idivl %esi
90 ; X64-NEXT: # kill: def $eax killed $eax def $rax
91 ; X64-NEXT: leal -1(%rax), %edi
92 ; X64-NEXT: testl %esi, %esi
94 ; X64-NEXT: testl %ecx, %ecx
96 ; X64-NEXT: xorb %sil, %cl
97 ; X64-NEXT: testl %edx, %edx
99 ; X64-NEXT: testb %cl, %dl
100 ; X64-NEXT: cmovnel %edi, %eax
101 ; X64-NEXT: cmpl $16383, %eax # imm = 0x3FFF
102 ; X64-NEXT: movl $16383, %ecx # imm = 0x3FFF
103 ; X64-NEXT: cmovgel %ecx, %eax
104 ; X64-NEXT: cmpl $-16383, %eax # imm = 0xC001
105 ; X64-NEXT: movl $-16384, %ecx # imm = 0xC000
106 ; X64-NEXT: cmovll %ecx, %eax
107 ; X64-NEXT: # kill: def $ax killed $ax killed $rax
112 ; X86-NEXT: pushl %ebx
113 ; X86-NEXT: pushl %edi
114 ; X86-NEXT: pushl %esi
115 ; X86-NEXT: movsbl {{[0-9]+}}(%esp), %esi
116 ; X86-NEXT: movsbl {{[0-9]+}}(%esp), %ecx
117 ; X86-NEXT: shll $14, %ecx
118 ; X86-NEXT: movl %ecx, %eax
120 ; X86-NEXT: idivl %esi
121 ; X86-NEXT: leal -1(%eax), %edi
122 ; X86-NEXT: testl %esi, %esi
124 ; X86-NEXT: testl %ecx, %ecx
126 ; X86-NEXT: xorb %bl, %cl
127 ; X86-NEXT: testl %edx, %edx
128 ; X86-NEXT: setne %dl
129 ; X86-NEXT: testb %cl, %dl
130 ; X86-NEXT: cmovnel %edi, %eax
131 ; X86-NEXT: cmpl $16383, %eax # imm = 0x3FFF
132 ; X86-NEXT: movl $16383, %ecx # imm = 0x3FFF
133 ; X86-NEXT: cmovgel %ecx, %eax
134 ; X86-NEXT: cmpl $-16383, %eax # imm = 0xC001
135 ; X86-NEXT: movl $-16384, %ecx # imm = 0xC000
136 ; X86-NEXT: cmovll %ecx, %eax
137 ; X86-NEXT: # kill: def $ax killed $ax killed $eax
138 ; X86-NEXT: popl %esi
139 ; X86-NEXT: popl %edi
140 ; X86-NEXT: popl %ebx
142 %x2 = sext i8 %x to i15
143 %y2 = sext i8 %y to i15
144 %tmp = call i15 @llvm.sdiv.fix.sat.i15(i15 %x2, i15 %y2, i32 14)
145 %tmp2 = sext i15 %tmp to i16
149 define i16 @func3(i15 %x, i8 %y) nounwind {
153 ; X64-NEXT: shll $8, %esi
154 ; X64-NEXT: movswl %si, %ecx
155 ; X64-NEXT: addl %edi, %edi
156 ; X64-NEXT: shrl $4, %ecx
157 ; X64-NEXT: movl %edi, %eax
159 ; X64-NEXT: idivw %cx
160 ; X64-NEXT: # kill: def $ax killed $ax def $rax
161 ; X64-NEXT: leal -1(%rax), %esi
162 ; X64-NEXT: testw %di, %di
163 ; X64-NEXT: sets %dil
164 ; X64-NEXT: testw %cx, %cx
166 ; X64-NEXT: xorb %dil, %cl
167 ; X64-NEXT: testw %dx, %dx
168 ; X64-NEXT: setne %dl
169 ; X64-NEXT: testb %cl, %dl
170 ; X64-NEXT: cmovnel %esi, %eax
171 ; X64-NEXT: movswl %ax, %ecx
172 ; X64-NEXT: cmpl $16383, %ecx # imm = 0x3FFF
173 ; X64-NEXT: movl $16383, %ecx # imm = 0x3FFF
174 ; X64-NEXT: cmovgel %ecx, %eax
175 ; X64-NEXT: movswl %ax, %ecx
176 ; X64-NEXT: cmpl $-16383, %ecx # imm = 0xC001
177 ; X64-NEXT: movl $49152, %ecx # imm = 0xC000
178 ; X64-NEXT: cmovll %ecx, %eax
179 ; X64-NEXT: # kill: def $ax killed $ax killed $rax
184 ; X86-NEXT: pushl %edi
185 ; X86-NEXT: pushl %esi
186 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
187 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
188 ; X86-NEXT: shll $8, %eax
189 ; X86-NEXT: movswl %ax, %esi
190 ; X86-NEXT: addl %ecx, %ecx
191 ; X86-NEXT: shrl $4, %esi
192 ; X86-NEXT: movl %ecx, %eax
194 ; X86-NEXT: idivw %si
195 ; X86-NEXT: # kill: def $ax killed $ax def $eax
196 ; X86-NEXT: leal -1(%eax), %edi
197 ; X86-NEXT: testw %cx, %cx
199 ; X86-NEXT: testw %si, %si
201 ; X86-NEXT: xorb %cl, %ch
202 ; X86-NEXT: testw %dx, %dx
203 ; X86-NEXT: setne %cl
204 ; X86-NEXT: testb %ch, %cl
205 ; X86-NEXT: cmovnel %edi, %eax
206 ; X86-NEXT: movswl %ax, %ecx
207 ; X86-NEXT: cmpl $16383, %ecx # imm = 0x3FFF
208 ; X86-NEXT: movl $16383, %ecx # imm = 0x3FFF
209 ; X86-NEXT: cmovgel %ecx, %eax
210 ; X86-NEXT: movswl %ax, %ecx
211 ; X86-NEXT: cmpl $-16383, %ecx # imm = 0xC001
212 ; X86-NEXT: movl $49152, %ecx # imm = 0xC000
213 ; X86-NEXT: cmovll %ecx, %eax
214 ; X86-NEXT: # kill: def $ax killed $ax killed $eax
215 ; X86-NEXT: popl %esi
216 ; X86-NEXT: popl %edi
218 %y2 = sext i8 %y to i15
220 %tmp = call i15 @llvm.sdiv.fix.sat.i15(i15 %x, i15 %y3, i32 4)
221 %tmp2 = sext i15 %tmp to i16
225 define i4 @func4(i4 %x, i4 %y) nounwind {
229 ; X64-NEXT: shlb $4, %sil
230 ; X64-NEXT: sarb $4, %sil
231 ; X64-NEXT: shlb $4, %dil
232 ; X64-NEXT: sarb $4, %dil
233 ; X64-NEXT: shlb $2, %dil
234 ; X64-NEXT: movsbl %dil, %ecx
235 ; X64-NEXT: movl %ecx, %eax
236 ; X64-NEXT: idivb %sil
237 ; X64-NEXT: movsbl %ah, %edx
238 ; X64-NEXT: movzbl %al, %eax
239 ; X64-NEXT: leal -1(%rax), %edi
240 ; X64-NEXT: movzbl %dil, %edi
241 ; X64-NEXT: testb %sil, %sil
242 ; X64-NEXT: sets %sil
243 ; X64-NEXT: testb %cl, %cl
245 ; X64-NEXT: xorb %sil, %cl
246 ; X64-NEXT: testb %dl, %dl
247 ; X64-NEXT: setne %dl
248 ; X64-NEXT: testb %cl, %dl
249 ; X64-NEXT: cmovel %eax, %edi
250 ; X64-NEXT: cmpb $7, %dil
251 ; X64-NEXT: movl $7, %ecx
252 ; X64-NEXT: cmovll %edi, %ecx
253 ; X64-NEXT: cmpb $-7, %cl
254 ; X64-NEXT: movl $248, %eax
255 ; X64-NEXT: cmovgel %ecx, %eax
256 ; X64-NEXT: # kill: def $al killed $al killed $eax
261 ; X86-NEXT: pushl %ebx
262 ; X86-NEXT: pushl %esi
263 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
264 ; X86-NEXT: shlb $4, %cl
265 ; X86-NEXT: sarb $4, %cl
266 ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
267 ; X86-NEXT: shlb $4, %dl
268 ; X86-NEXT: sarb $4, %dl
269 ; X86-NEXT: shlb $2, %dl
270 ; X86-NEXT: movsbl %dl, %eax
271 ; X86-NEXT: idivb %cl
272 ; X86-NEXT: movsbl %ah, %ebx
273 ; X86-NEXT: movzbl %al, %esi
275 ; X86-NEXT: movzbl %al, %eax
276 ; X86-NEXT: testb %cl, %cl
278 ; X86-NEXT: testb %dl, %dl
280 ; X86-NEXT: xorb %cl, %dl
281 ; X86-NEXT: testb %bl, %bl
282 ; X86-NEXT: setne %cl
283 ; X86-NEXT: testb %dl, %cl
284 ; X86-NEXT: cmovel %esi, %eax
285 ; X86-NEXT: cmpb $7, %al
286 ; X86-NEXT: movl $7, %ecx
287 ; X86-NEXT: cmovll %eax, %ecx
288 ; X86-NEXT: cmpb $-7, %cl
289 ; X86-NEXT: movl $248, %eax
290 ; X86-NEXT: cmovgel %ecx, %eax
291 ; X86-NEXT: # kill: def $al killed $al killed $eax
292 ; X86-NEXT: popl %esi
293 ; X86-NEXT: popl %ebx
295 %tmp = call i4 @llvm.sdiv.fix.sat.i4(i4 %x, i4 %y, i32 2)
299 define i64 @func5(i64 %x, i64 %y) nounwind {
303 ; X64-NEXT: pushq %rbp
304 ; X64-NEXT: pushq %r15
305 ; X64-NEXT: pushq %r14
306 ; X64-NEXT: pushq %r13
307 ; X64-NEXT: pushq %r12
308 ; X64-NEXT: pushq %rbx
309 ; X64-NEXT: subq $24, %rsp
310 ; X64-NEXT: movq %rsi, %rdx
311 ; X64-NEXT: movq %rsi, (%rsp) # 8-byte Spill
312 ; X64-NEXT: movq %rdi, %r14
313 ; X64-NEXT: leaq (%rdi,%rdi), %rax
314 ; X64-NEXT: movq %rdi, %r15
315 ; X64-NEXT: sarq $63, %r15
316 ; X64-NEXT: shldq $31, %rax, %r15
317 ; X64-NEXT: shlq $32, %r14
318 ; X64-NEXT: movq %rsi, %r12
319 ; X64-NEXT: sarq $63, %r12
320 ; X64-NEXT: movq %r14, %rdi
321 ; X64-NEXT: movq %r15, %rsi
322 ; X64-NEXT: movq %r12, %rcx
323 ; X64-NEXT: callq __divti3@PLT
324 ; X64-NEXT: movq %rax, %r13
325 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
326 ; X64-NEXT: movq %rdx, %rbp
327 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
328 ; X64-NEXT: subq $1, %r13
329 ; X64-NEXT: sbbq $0, %rbp
330 ; X64-NEXT: testq %r15, %r15
332 ; X64-NEXT: testq %r12, %r12
334 ; X64-NEXT: xorb %al, %bl
335 ; X64-NEXT: movq %r14, %rdi
336 ; X64-NEXT: movq %r15, %rsi
337 ; X64-NEXT: movq (%rsp), %rdx # 8-byte Reload
338 ; X64-NEXT: movq %r12, %rcx
339 ; X64-NEXT: callq __modti3@PLT
340 ; X64-NEXT: orq %rax, %rdx
341 ; X64-NEXT: setne %al
342 ; X64-NEXT: testb %bl, %al
343 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
344 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
345 ; X64-NEXT: movq %rbp, %rcx
346 ; X64-NEXT: sarq $63, %rcx
347 ; X64-NEXT: andq %rbp, %rcx
348 ; X64-NEXT: testq %rbp, %rbp
349 ; X64-NEXT: movq $-1, %rdx
350 ; X64-NEXT: cmovgq %rdx, %r13
351 ; X64-NEXT: xorl %eax, %eax
352 ; X64-NEXT: cmpq $-1, %rcx
353 ; X64-NEXT: cmovlq %rdx, %rcx
354 ; X64-NEXT: cmovgeq %r13, %rax
355 ; X64-NEXT: shrdq $1, %rcx, %rax
356 ; X64-NEXT: addq $24, %rsp
357 ; X64-NEXT: popq %rbx
358 ; X64-NEXT: popq %r12
359 ; X64-NEXT: popq %r13
360 ; X64-NEXT: popq %r14
361 ; X64-NEXT: popq %r15
362 ; X64-NEXT: popq %rbp
367 ; X86-NEXT: pushl %ebp
368 ; X86-NEXT: movl %esp, %ebp
369 ; X86-NEXT: pushl %ebx
370 ; X86-NEXT: pushl %edi
371 ; X86-NEXT: pushl %esi
372 ; X86-NEXT: andl $-16, %esp
373 ; X86-NEXT: subl $96, %esp
374 ; X86-NEXT: movl 8(%ebp), %ecx
375 ; X86-NEXT: movl 12(%ebp), %eax
376 ; X86-NEXT: movl 20(%ebp), %esi
377 ; X86-NEXT: movl %esi, %ebx
378 ; X86-NEXT: sarl $31, %ebx
379 ; X86-NEXT: movl %eax, %edx
380 ; X86-NEXT: sarl $31, %edx
381 ; X86-NEXT: movl %edx, %edi
382 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
383 ; X86-NEXT: shldl $31, %eax, %edi
384 ; X86-NEXT: shldl $31, %ecx, %eax
385 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
386 ; X86-NEXT: shll $31, %ecx
387 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
388 ; X86-NEXT: pushl %ebx
389 ; X86-NEXT: pushl %ebx
390 ; X86-NEXT: pushl %esi
391 ; X86-NEXT: pushl 16(%ebp)
392 ; X86-NEXT: pushl %edx
393 ; X86-NEXT: pushl %edi
394 ; X86-NEXT: pushl %eax
395 ; X86-NEXT: pushl %ecx
396 ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
397 ; X86-NEXT: pushl %eax
398 ; X86-NEXT: calll __divti3
399 ; X86-NEXT: addl $32, %esp
400 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
401 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
402 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
403 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
404 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
405 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
406 ; X86-NEXT: subl $1, %esi
407 ; X86-NEXT: sbbl $0, %ecx
408 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
409 ; X86-NEXT: sbbl $0, %eax
410 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
411 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
412 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
413 ; X86-NEXT: sbbl $0, %eax
414 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
415 ; X86-NEXT: testl %ebx, %ebx
417 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
418 ; X86-NEXT: testl %ecx, %ecx
420 ; X86-NEXT: xorb %al, %dl
421 ; X86-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
422 ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
423 ; X86-NEXT: pushl %ebx
424 ; X86-NEXT: pushl %ebx
425 ; X86-NEXT: pushl 20(%ebp)
426 ; X86-NEXT: pushl 16(%ebp)
427 ; X86-NEXT: pushl %ecx
428 ; X86-NEXT: pushl %edi
429 ; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
430 ; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
431 ; X86-NEXT: pushl %eax
432 ; X86-NEXT: calll __modti3
433 ; X86-NEXT: addl $32, %esp
434 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
435 ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
436 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
437 ; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
438 ; X86-NEXT: orl %eax, %ecx
439 ; X86-NEXT: setne %al
440 ; X86-NEXT: testb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
441 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
442 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
443 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
444 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
445 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
446 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
447 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
448 ; X86-NEXT: cmpl $-1, %esi
449 ; X86-NEXT: movl %eax, %ecx
450 ; X86-NEXT: sbbl $2147483647, %ecx # imm = 0x7FFFFFFF
451 ; X86-NEXT: movl %edx, %ecx
452 ; X86-NEXT: movl %edx, %ebx
453 ; X86-NEXT: sbbl $0, %ecx
454 ; X86-NEXT: movl %edi, %ecx
455 ; X86-NEXT: sbbl $0, %ecx
456 ; X86-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
457 ; X86-NEXT: cmovll %eax, %edx
458 ; X86-NEXT: movl $0, %ecx
459 ; X86-NEXT: cmovgel %ecx, %edi
460 ; X86-NEXT: movl %edi, %eax
461 ; X86-NEXT: cmovgel %ecx, %ebx
462 ; X86-NEXT: movl $-1, %ecx
463 ; X86-NEXT: cmovgel %ecx, %esi
464 ; X86-NEXT: movl %esi, %edi
465 ; X86-NEXT: negl %edi
466 ; X86-NEXT: movl $-2147483648, %edi # imm = 0x80000000
467 ; X86-NEXT: sbbl %edx, %edi
468 ; X86-NEXT: movl $-1, %edi
469 ; X86-NEXT: sbbl %ebx, %edi
470 ; X86-NEXT: sbbl %eax, %ecx
471 ; X86-NEXT: movl $0, %eax
472 ; X86-NEXT: cmovgel %eax, %esi
473 ; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000
474 ; X86-NEXT: cmovgel %eax, %edx
475 ; X86-NEXT: movl %esi, %eax
476 ; X86-NEXT: leal -12(%ebp), %esp
477 ; X86-NEXT: popl %esi
478 ; X86-NEXT: popl %edi
479 ; X86-NEXT: popl %ebx
480 ; X86-NEXT: popl %ebp
482 %tmp = call i64 @llvm.sdiv.fix.sat.i64(i64 %x, i64 %y, i32 31)
486 define i18 @func6(i16 %x, i16 %y) nounwind {
490 ; X64-NEXT: movswl %di, %ecx
491 ; X64-NEXT: movswl %si, %esi
492 ; X64-NEXT: shll $7, %ecx
493 ; X64-NEXT: movl %ecx, %eax
495 ; X64-NEXT: idivl %esi
496 ; X64-NEXT: # kill: def $eax killed $eax def $rax
497 ; X64-NEXT: leal -1(%rax), %edi
498 ; X64-NEXT: testl %esi, %esi
499 ; X64-NEXT: sets %sil
500 ; X64-NEXT: testl %ecx, %ecx
502 ; X64-NEXT: xorb %sil, %cl
503 ; X64-NEXT: testl %edx, %edx
504 ; X64-NEXT: setne %dl
505 ; X64-NEXT: testb %cl, %dl
506 ; X64-NEXT: cmovnel %edi, %eax
507 ; X64-NEXT: cmpl $131071, %eax # imm = 0x1FFFF
508 ; X64-NEXT: movl $131071, %ecx # imm = 0x1FFFF
509 ; X64-NEXT: cmovgel %ecx, %eax
510 ; X64-NEXT: cmpl $-131071, %eax # imm = 0xFFFE0001
511 ; X64-NEXT: movl $-131072, %ecx # imm = 0xFFFE0000
512 ; X64-NEXT: cmovll %ecx, %eax
513 ; X64-NEXT: # kill: def $eax killed $eax killed $rax
518 ; X86-NEXT: pushl %ebx
519 ; X86-NEXT: pushl %edi
520 ; X86-NEXT: pushl %esi
521 ; X86-NEXT: movswl {{[0-9]+}}(%esp), %esi
522 ; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
523 ; X86-NEXT: shll $7, %ecx
524 ; X86-NEXT: movl %ecx, %eax
526 ; X86-NEXT: idivl %esi
527 ; X86-NEXT: leal -1(%eax), %edi
528 ; X86-NEXT: testl %esi, %esi
530 ; X86-NEXT: testl %ecx, %ecx
532 ; X86-NEXT: xorb %bl, %cl
533 ; X86-NEXT: testl %edx, %edx
534 ; X86-NEXT: setne %dl
535 ; X86-NEXT: testb %cl, %dl
536 ; X86-NEXT: cmovnel %edi, %eax
537 ; X86-NEXT: cmpl $131071, %eax # imm = 0x1FFFF
538 ; X86-NEXT: movl $131071, %ecx # imm = 0x1FFFF
539 ; X86-NEXT: cmovgel %ecx, %eax
540 ; X86-NEXT: cmpl $-131071, %eax # imm = 0xFFFE0001
541 ; X86-NEXT: movl $-131072, %ecx # imm = 0xFFFE0000
542 ; X86-NEXT: cmovll %ecx, %eax
543 ; X86-NEXT: popl %esi
544 ; X86-NEXT: popl %edi
545 ; X86-NEXT: popl %ebx
547 %x2 = sext i16 %x to i18
548 %y2 = sext i16 %y to i18
549 %tmp = call i18 @llvm.sdiv.fix.sat.i18(i18 %x2, i18 %y2, i32 7)
553 define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
557 ; X64-NEXT: pushq %rbp
558 ; X64-NEXT: pushq %r15
559 ; X64-NEXT: pushq %r14
560 ; X64-NEXT: pushq %r13
561 ; X64-NEXT: pushq %r12
562 ; X64-NEXT: pushq %rbx
563 ; X64-NEXT: subq $120, %rsp
564 ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
565 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
566 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
567 ; X64-NEXT: psllq $32, %xmm3
568 ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3]
569 ; X64-NEXT: psrad $31, %xmm2
570 ; X64-NEXT: psrlq $31, %xmm3
571 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
572 ; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
573 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
574 ; X64-NEXT: movq %xmm0, %rbx
575 ; X64-NEXT: movq %rbx, %r13
576 ; X64-NEXT: sarq $63, %r13
577 ; X64-NEXT: shldq $31, %rbx, %r13
578 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
579 ; X64-NEXT: pxor %xmm0, %xmm0
580 ; X64-NEXT: pcmpgtd %xmm1, %xmm0
581 ; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
582 ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
583 ; X64-NEXT: movq %xmm1, %rdx
584 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
585 ; X64-NEXT: movq %rdx, %r15
586 ; X64-NEXT: sarq $63, %r15
587 ; X64-NEXT: movq %rbx, %r12
588 ; X64-NEXT: shlq $31, %r12
589 ; X64-NEXT: movq %r12, %rdi
590 ; X64-NEXT: movq %r13, %rsi
591 ; X64-NEXT: movq %r15, %rcx
592 ; X64-NEXT: callq __divti3@PLT
593 ; X64-NEXT: movq %rax, %rbp
594 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
595 ; X64-NEXT: movq %rdx, %r14
596 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
597 ; X64-NEXT: subq $1, %rbp
598 ; X64-NEXT: sbbq $0, %r14
599 ; X64-NEXT: shrq $63, %rbx
600 ; X64-NEXT: xorl %r15d, %ebx
601 ; X64-NEXT: movq %r12, %rdi
602 ; X64-NEXT: movq %r13, %rsi
603 ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
604 ; X64-NEXT: movq %r15, %rcx
605 ; X64-NEXT: callq __modti3@PLT
606 ; X64-NEXT: orq %rax, %rdx
607 ; X64-NEXT: setne %al
608 ; X64-NEXT: testb %bl, %al
609 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
610 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
611 ; X64-NEXT: xorl %ecx, %ecx
612 ; X64-NEXT: movl $4294967295, %edx # imm = 0xFFFFFFFF
613 ; X64-NEXT: cmpq %rdx, %rbp
614 ; X64-NEXT: movq %r14, %rax
615 ; X64-NEXT: sbbq $0, %rax
616 ; X64-NEXT: cmovgeq %rcx, %r14
617 ; X64-NEXT: cmovgeq %rdx, %rbp
618 ; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
619 ; X64-NEXT: cmpq %rbp, %rcx
620 ; X64-NEXT: movq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
621 ; X64-NEXT: movq $-1, %rax
622 ; X64-NEXT: sbbq %r14, %rax
623 ; X64-NEXT: cmovgeq %rcx, %rbp
624 ; X64-NEXT: movq %rbp, %xmm0
625 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
626 ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
627 ; X64-NEXT: # xmm0 = mem[2,3,2,3]
628 ; X64-NEXT: movq %xmm0, %rbx
629 ; X64-NEXT: movq %rbx, %r13
630 ; X64-NEXT: sarq $63, %r13
631 ; X64-NEXT: shldq $31, %rbx, %r13
632 ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
633 ; X64-NEXT: # xmm0 = mem[2,3,2,3]
634 ; X64-NEXT: movq %xmm0, %rdx
635 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
636 ; X64-NEXT: movq %rdx, %r15
637 ; X64-NEXT: sarq $63, %r15
638 ; X64-NEXT: movq %rbx, %r12
639 ; X64-NEXT: shlq $31, %r12
640 ; X64-NEXT: movq %r12, %rdi
641 ; X64-NEXT: movq %r13, %rsi
642 ; X64-NEXT: movq %r15, %rcx
643 ; X64-NEXT: callq __divti3@PLT
644 ; X64-NEXT: movq %rax, %rbp
645 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
646 ; X64-NEXT: movq %rdx, %r14
647 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
648 ; X64-NEXT: subq $1, %rbp
649 ; X64-NEXT: sbbq $0, %r14
650 ; X64-NEXT: shrq $63, %rbx
651 ; X64-NEXT: xorl %r15d, %ebx
652 ; X64-NEXT: movq %r12, %rdi
653 ; X64-NEXT: movq %r13, %rsi
654 ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
655 ; X64-NEXT: movq %r15, %rcx
656 ; X64-NEXT: callq __modti3@PLT
657 ; X64-NEXT: orq %rax, %rdx
658 ; X64-NEXT: setne %al
659 ; X64-NEXT: testb %bl, %al
660 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
661 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
662 ; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF
663 ; X64-NEXT: cmpq %rcx, %rbp
664 ; X64-NEXT: movq %r14, %rax
665 ; X64-NEXT: sbbq $0, %rax
666 ; X64-NEXT: movl $0, %eax
667 ; X64-NEXT: cmovgeq %rax, %r14
668 ; X64-NEXT: cmovgeq %rcx, %rbp
669 ; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
670 ; X64-NEXT: cmpq %rbp, %rcx
671 ; X64-NEXT: movq $-1, %rax
672 ; X64-NEXT: sbbq %r14, %rax
673 ; X64-NEXT: cmovgeq %rcx, %rbp
674 ; X64-NEXT: movq %rbp, %xmm0
675 ; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
676 ; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
677 ; X64-NEXT: psrlq $1, %xmm1
678 ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
679 ; X64-NEXT: pshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
680 ; X64-NEXT: # xmm0 = mem[0,1,1,3]
681 ; X64-NEXT: psllq $32, %xmm0
682 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
683 ; X64-NEXT: psrad $31, %xmm1
684 ; X64-NEXT: psrlq $31, %xmm0
685 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
686 ; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
687 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
688 ; X64-NEXT: movq %xmm0, %rbx
689 ; X64-NEXT: movq %rbx, %r13
690 ; X64-NEXT: sarq $63, %r13
691 ; X64-NEXT: shldq $31, %rbx, %r13
692 ; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
693 ; X64-NEXT: pxor %xmm1, %xmm1
694 ; X64-NEXT: pcmpgtd %xmm0, %xmm1
695 ; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
696 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
697 ; X64-NEXT: movq %xmm0, %rdx
698 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
699 ; X64-NEXT: movq %rdx, %r15
700 ; X64-NEXT: sarq $63, %r15
701 ; X64-NEXT: movq %rbx, %r12
702 ; X64-NEXT: shlq $31, %r12
703 ; X64-NEXT: movq %r12, %rdi
704 ; X64-NEXT: movq %r13, %rsi
705 ; X64-NEXT: movq %r15, %rcx
706 ; X64-NEXT: callq __divti3@PLT
707 ; X64-NEXT: movq %rax, %rbp
708 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
709 ; X64-NEXT: movq %rdx, %r14
710 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
711 ; X64-NEXT: subq $1, %rbp
712 ; X64-NEXT: sbbq $0, %r14
713 ; X64-NEXT: shrq $63, %rbx
714 ; X64-NEXT: xorl %r15d, %ebx
715 ; X64-NEXT: movq %r12, %rdi
716 ; X64-NEXT: movq %r13, %rsi
717 ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
718 ; X64-NEXT: movq %r15, %rcx
719 ; X64-NEXT: callq __modti3@PLT
720 ; X64-NEXT: orq %rax, %rdx
721 ; X64-NEXT: setne %al
722 ; X64-NEXT: testb %bl, %al
723 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
724 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
725 ; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF
726 ; X64-NEXT: cmpq %rcx, %rbp
727 ; X64-NEXT: movq %r14, %rax
728 ; X64-NEXT: sbbq $0, %rax
729 ; X64-NEXT: movl $0, %eax
730 ; X64-NEXT: cmovgeq %rax, %r14
731 ; X64-NEXT: cmovgeq %rcx, %rbp
732 ; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
733 ; X64-NEXT: cmpq %rbp, %rcx
734 ; X64-NEXT: movq $-1, %rax
735 ; X64-NEXT: sbbq %r14, %rax
736 ; X64-NEXT: cmovgeq %rcx, %rbp
737 ; X64-NEXT: movq %rbp, %xmm0
738 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
739 ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
740 ; X64-NEXT: # xmm0 = mem[2,3,2,3]
741 ; X64-NEXT: movq %xmm0, %rbx
742 ; X64-NEXT: movq %rbx, %r13
743 ; X64-NEXT: sarq $63, %r13
744 ; X64-NEXT: shldq $31, %rbx, %r13
745 ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
746 ; X64-NEXT: # xmm0 = mem[2,3,2,3]
747 ; X64-NEXT: movq %xmm0, %rdx
748 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
749 ; X64-NEXT: movq %rdx, %r15
750 ; X64-NEXT: sarq $63, %r15
751 ; X64-NEXT: movq %rbx, %r12
752 ; X64-NEXT: shlq $31, %r12
753 ; X64-NEXT: movq %r12, %rdi
754 ; X64-NEXT: movq %r13, %rsi
755 ; X64-NEXT: movq %r15, %rcx
756 ; X64-NEXT: callq __divti3@PLT
757 ; X64-NEXT: movq %rax, %rbp
758 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
759 ; X64-NEXT: movq %rdx, %r14
760 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
761 ; X64-NEXT: subq $1, %rbp
762 ; X64-NEXT: sbbq $0, %r14
763 ; X64-NEXT: shrq $63, %rbx
764 ; X64-NEXT: xorl %r15d, %ebx
765 ; X64-NEXT: movq %r12, %rdi
766 ; X64-NEXT: movq %r13, %rsi
767 ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
768 ; X64-NEXT: movq %r15, %rcx
769 ; X64-NEXT: callq __modti3@PLT
770 ; X64-NEXT: orq %rax, %rdx
771 ; X64-NEXT: setne %al
772 ; X64-NEXT: testb %bl, %al
773 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
774 ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
775 ; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF
776 ; X64-NEXT: cmpq %rcx, %rbp
777 ; X64-NEXT: movq %r14, %rax
778 ; X64-NEXT: sbbq $0, %rax
779 ; X64-NEXT: movl $0, %eax
780 ; X64-NEXT: cmovgeq %rax, %r14
781 ; X64-NEXT: cmovgeq %rcx, %rbp
782 ; X64-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000
783 ; X64-NEXT: cmpq %rbp, %rax
784 ; X64-NEXT: sbbq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
785 ; X64-NEXT: cmovgeq %rax, %rbp
786 ; X64-NEXT: movq %rbp, %xmm1
787 ; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
788 ; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
789 ; X64-NEXT: psrlq $1, %xmm0
790 ; X64-NEXT: shufps $136, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
791 ; X64-NEXT: # xmm0 = xmm0[0,2],mem[0,2]
792 ; X64-NEXT: addq $120, %rsp
793 ; X64-NEXT: popq %rbx
794 ; X64-NEXT: popq %r12
795 ; X64-NEXT: popq %r13
796 ; X64-NEXT: popq %r14
797 ; X64-NEXT: popq %r15
798 ; X64-NEXT: popq %rbp
803 ; X86-NEXT: pushl %ebp
804 ; X86-NEXT: movl %esp, %ebp
805 ; X86-NEXT: pushl %ebx
806 ; X86-NEXT: pushl %edi
807 ; X86-NEXT: pushl %esi
808 ; X86-NEXT: andl $-16, %esp
809 ; X86-NEXT: subl $208, %esp
810 ; X86-NEXT: movl 36(%ebp), %esi
811 ; X86-NEXT: movl 16(%ebp), %ebx
812 ; X86-NEXT: movl 32(%ebp), %eax
813 ; X86-NEXT: movl %eax, %edi
814 ; X86-NEXT: movl %eax, %ecx
815 ; X86-NEXT: sarl $31, %edi
816 ; X86-NEXT: movl %ebx, %edx
817 ; X86-NEXT: sarl $31, %edx
818 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
819 ; X86-NEXT: leal (%ebx,%ebx), %eax
820 ; X86-NEXT: shrl $31, %ebx
821 ; X86-NEXT: shldl $31, %eax, %ebx
822 ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
823 ; X86-NEXT: pushl %edi
824 ; X86-NEXT: pushl %edi
825 ; X86-NEXT: pushl %edi
826 ; X86-NEXT: pushl %ecx
827 ; X86-NEXT: pushl %edx
828 ; X86-NEXT: pushl %edx
829 ; X86-NEXT: pushl %ebx
831 ; X86-NEXT: pushl %eax
832 ; X86-NEXT: calll __modti3
833 ; X86-NEXT: addl $32, %esp
834 ; X86-NEXT: sarl $31, %esi
835 ; X86-NEXT: movl 20(%ebp), %ecx
836 ; X86-NEXT: movl %ecx, %eax
837 ; X86-NEXT: sarl $31, %eax
838 ; X86-NEXT: leal (%ecx,%ecx), %edx
839 ; X86-NEXT: shrl $31, %ecx
840 ; X86-NEXT: shldl $31, %edx, %ecx
841 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
842 ; X86-NEXT: leal {{[0-9]+}}(%esp), %edx
843 ; X86-NEXT: pushl %esi
844 ; X86-NEXT: pushl %esi
845 ; X86-NEXT: pushl %esi
846 ; X86-NEXT: pushl 36(%ebp)
847 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
848 ; X86-NEXT: pushl %eax
849 ; X86-NEXT: pushl %eax
850 ; X86-NEXT: pushl %ecx
852 ; X86-NEXT: pushl %edx
853 ; X86-NEXT: calll __divti3
854 ; X86-NEXT: addl $32, %esp
855 ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
856 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
857 ; X86-NEXT: pushl %edi
858 ; X86-NEXT: pushl %edi
859 ; X86-NEXT: pushl %edi
860 ; X86-NEXT: pushl 32(%ebp)
861 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
862 ; X86-NEXT: pushl %ecx
863 ; X86-NEXT: pushl %ecx
864 ; X86-NEXT: pushl %ebx
866 ; X86-NEXT: pushl %eax
867 ; X86-NEXT: calll __divti3
868 ; X86-NEXT: addl $32, %esp
869 ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
870 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
871 ; X86-NEXT: pushl %esi
872 ; X86-NEXT: pushl %esi
873 ; X86-NEXT: pushl %esi
874 ; X86-NEXT: pushl 36(%ebp)
875 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
876 ; X86-NEXT: pushl %edi
877 ; X86-NEXT: pushl %edi
878 ; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
880 ; X86-NEXT: pushl %eax
881 ; X86-NEXT: calll __modti3
882 ; X86-NEXT: addl $32, %esp
883 ; X86-NEXT: movl 28(%ebp), %edx
884 ; X86-NEXT: movl %edx, %ebx
885 ; X86-NEXT: sarl $31, %ebx
886 ; X86-NEXT: movl 12(%ebp), %ecx
887 ; X86-NEXT: movl %ecx, %edi
888 ; X86-NEXT: sarl $31, %edi
889 ; X86-NEXT: leal (%ecx,%ecx), %eax
890 ; X86-NEXT: shrl $31, %ecx
891 ; X86-NEXT: shldl $31, %eax, %ecx
892 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
893 ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
894 ; X86-NEXT: pushl %ebx
895 ; X86-NEXT: pushl %ebx
896 ; X86-NEXT: pushl %ebx
897 ; X86-NEXT: pushl %edx
898 ; X86-NEXT: pushl %edi
899 ; X86-NEXT: pushl %edi
900 ; X86-NEXT: pushl %ecx
902 ; X86-NEXT: pushl %eax
903 ; X86-NEXT: calll __modti3
904 ; X86-NEXT: addl $32, %esp
905 ; X86-NEXT: movl 40(%ebp), %esi
906 ; X86-NEXT: sarl $31, %esi
907 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
908 ; X86-NEXT: movl 24(%ebp), %ecx
909 ; X86-NEXT: movl %ecx, %eax
910 ; X86-NEXT: sarl $31, %eax
911 ; X86-NEXT: leal (%ecx,%ecx), %edx
912 ; X86-NEXT: shrl $31, %ecx
913 ; X86-NEXT: shldl $31, %edx, %ecx
914 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
915 ; X86-NEXT: leal {{[0-9]+}}(%esp), %edx
916 ; X86-NEXT: pushl %esi
917 ; X86-NEXT: pushl %esi
918 ; X86-NEXT: pushl %esi
919 ; X86-NEXT: pushl 40(%ebp)
920 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
921 ; X86-NEXT: pushl %eax
922 ; X86-NEXT: pushl %eax
923 ; X86-NEXT: pushl %ecx
925 ; X86-NEXT: pushl %edx
926 ; X86-NEXT: calll __divti3
927 ; X86-NEXT: addl $32, %esp
928 ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
929 ; X86-NEXT: pushl %ebx
930 ; X86-NEXT: pushl %ebx
931 ; X86-NEXT: pushl %ebx
932 ; X86-NEXT: pushl 28(%ebp)
933 ; X86-NEXT: pushl %edi
934 ; X86-NEXT: pushl %edi
935 ; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
937 ; X86-NEXT: pushl %eax
938 ; X86-NEXT: calll __divti3
939 ; X86-NEXT: addl $32, %esp
940 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
941 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
942 ; X86-NEXT: subl $1, %eax
943 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
944 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
945 ; X86-NEXT: sbbl $0, %ecx
946 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
947 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
948 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
949 ; X86-NEXT: sbbl $0, %ecx
950 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
951 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
952 ; X86-NEXT: sbbl $0, %edx
953 ; X86-NEXT: testl %ebx, %ebx
955 ; X86-NEXT: testl %edi, %edi
957 ; X86-NEXT: xorb %bl, %bh
958 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
959 ; X86-NEXT: orl {{[0-9]+}}(%esp), %edi
960 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
961 ; X86-NEXT: orl {{[0-9]+}}(%esp), %esi
962 ; X86-NEXT: orl %edi, %esi
963 ; X86-NEXT: setne %bl
964 ; X86-NEXT: testb %bh, %bl
965 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
966 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
967 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
968 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
969 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
970 ; X86-NEXT: xorl %ebx, %ebx
971 ; X86-NEXT: cmpl $-1, %eax
972 ; X86-NEXT: movl %edi, %esi
973 ; X86-NEXT: sbbl $0, %esi
974 ; X86-NEXT: movl %ecx, %esi
975 ; X86-NEXT: sbbl $0, %esi
976 ; X86-NEXT: movl %edx, %esi
977 ; X86-NEXT: sbbl $0, %esi
978 ; X86-NEXT: cmovgel %ebx, %edx
979 ; X86-NEXT: cmovgel %ebx, %ecx
980 ; X86-NEXT: cmovgel %ebx, %edi
981 ; X86-NEXT: movl $-1, %esi
982 ; X86-NEXT: cmovgel %esi, %eax
983 ; X86-NEXT: movl %eax, %esi
984 ; X86-NEXT: negl %esi
985 ; X86-NEXT: movl $-1, %esi
986 ; X86-NEXT: sbbl %edi, %esi
987 ; X86-NEXT: movl $-1, %esi
988 ; X86-NEXT: sbbl %ecx, %esi
989 ; X86-NEXT: movl $-1, %ecx
990 ; X86-NEXT: sbbl %edx, %ecx
991 ; X86-NEXT: cmovgel %ebx, %eax
992 ; X86-NEXT: movl $-1, %edx
993 ; X86-NEXT: cmovgel %edx, %edi
994 ; X86-NEXT: shldl $31, %eax, %edi
995 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
996 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
997 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
998 ; X86-NEXT: subl $1, %eax
999 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1000 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1001 ; X86-NEXT: sbbl $0, %ecx
1002 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1003 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1004 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1005 ; X86-NEXT: sbbl $0, %edi
1006 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1007 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1008 ; X86-NEXT: sbbl $0, %edx
1009 ; X86-NEXT: cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1010 ; X86-NEXT: sets %bl
1011 ; X86-NEXT: cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1012 ; X86-NEXT: sets %bh
1013 ; X86-NEXT: xorb %bl, %bh
1014 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1015 ; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
1016 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
1017 ; X86-NEXT: orl {{[0-9]+}}(%esp), %esi
1018 ; X86-NEXT: orl %ecx, %esi
1019 ; X86-NEXT: setne %cl
1020 ; X86-NEXT: testb %bh, %cl
1021 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1022 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
1023 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
1024 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
1025 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1026 ; X86-NEXT: cmpl $-1, %eax
1027 ; X86-NEXT: movl %esi, %ecx
1028 ; X86-NEXT: sbbl $0, %ecx
1029 ; X86-NEXT: movl %edi, %ecx
1030 ; X86-NEXT: sbbl $0, %ecx
1031 ; X86-NEXT: movl %edx, %ecx
1032 ; X86-NEXT: sbbl $0, %ecx
1033 ; X86-NEXT: movl $0, %ecx
1034 ; X86-NEXT: cmovgel %ecx, %edx
1035 ; X86-NEXT: cmovgel %ecx, %edi
1036 ; X86-NEXT: cmovgel %ecx, %esi
1037 ; X86-NEXT: movl $-1, %ebx
1038 ; X86-NEXT: cmovgel %ebx, %eax
1039 ; X86-NEXT: movl %eax, %ecx
1040 ; X86-NEXT: negl %ecx
1041 ; X86-NEXT: movl $-1, %ecx
1042 ; X86-NEXT: sbbl %esi, %ecx
1043 ; X86-NEXT: movl $-1, %ecx
1044 ; X86-NEXT: sbbl %edi, %ecx
1045 ; X86-NEXT: movl $-1, %ecx
1046 ; X86-NEXT: sbbl %edx, %ecx
1047 ; X86-NEXT: movl $0, %ecx
1048 ; X86-NEXT: cmovgel %ecx, %eax
1049 ; X86-NEXT: cmovgel %ebx, %esi
1050 ; X86-NEXT: shldl $31, %eax, %esi
1051 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1052 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1053 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1054 ; X86-NEXT: subl $1, %eax
1055 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1056 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1057 ; X86-NEXT: sbbl $0, %ecx
1058 ; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1059 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1060 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1061 ; X86-NEXT: sbbl $0, %edi
1062 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
1063 ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1064 ; X86-NEXT: sbbl $0, %edx
1065 ; X86-NEXT: cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1066 ; X86-NEXT: sets %bl
1067 ; X86-NEXT: cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1068 ; X86-NEXT: sets %bh
1069 ; X86-NEXT: xorb %bl, %bh
1070 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1071 ; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
1072 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
1073 ; X86-NEXT: orl {{[0-9]+}}(%esp), %esi
1074 ; X86-NEXT: orl %ecx, %esi
1075 ; X86-NEXT: setne %cl
1076 ; X86-NEXT: testb %bh, %cl
1077 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
1078 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
1079 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
1080 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
1081 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
1082 ; X86-NEXT: cmpl $-1, %eax
1083 ; X86-NEXT: movl %ebx, %ecx
1084 ; X86-NEXT: sbbl $0, %ecx
1085 ; X86-NEXT: movl %edi, %ecx
1086 ; X86-NEXT: sbbl $0, %ecx
1087 ; X86-NEXT: movl %edx, %ecx
1088 ; X86-NEXT: sbbl $0, %ecx
1089 ; X86-NEXT: movl $0, %ecx
1090 ; X86-NEXT: cmovgel %ecx, %edx
1091 ; X86-NEXT: cmovgel %ecx, %edi
1092 ; X86-NEXT: cmovgel %ecx, %ebx
1093 ; X86-NEXT: movl $-1, %esi
1094 ; X86-NEXT: cmovgel %esi, %eax
1095 ; X86-NEXT: movl %eax, %ecx
1096 ; X86-NEXT: negl %ecx
1097 ; X86-NEXT: movl $-1, %ecx
1098 ; X86-NEXT: sbbl %ebx, %ecx
1099 ; X86-NEXT: movl $-1, %ecx
1100 ; X86-NEXT: sbbl %edi, %ecx
1101 ; X86-NEXT: movl $-1, %ecx
1102 ; X86-NEXT: sbbl %edx, %ecx
1103 ; X86-NEXT: movl $0, %ecx
1104 ; X86-NEXT: cmovgel %ecx, %eax
1105 ; X86-NEXT: cmovgel %esi, %ebx
1106 ; X86-NEXT: shldl $31, %eax, %ebx
1107 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1108 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
1109 ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1110 ; X86-NEXT: subl $1, %ebx
1111 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
1112 ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1113 ; X86-NEXT: sbbl $0, %edi
1114 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1115 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1116 ; X86-NEXT: sbbl $0, %eax
1117 ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1118 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
1119 ; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
1120 ; X86-NEXT: sbbl $0, %esi
1121 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1122 ; X86-NEXT: testl %ecx, %ecx
1123 ; X86-NEXT: sets %al
1124 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
1125 ; X86-NEXT: testl %edx, %edx
1126 ; X86-NEXT: sets %ah
1127 ; X86-NEXT: xorb %al, %ah
1128 ; X86-NEXT: movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
1129 ; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
1130 ; X86-NEXT: pushl %ecx
1131 ; X86-NEXT: pushl %ecx
1132 ; X86-NEXT: pushl %ecx
1133 ; X86-NEXT: pushl 40(%ebp)
1134 ; X86-NEXT: pushl %edx
1135 ; X86-NEXT: pushl %edx
1136 ; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
1137 ; X86-NEXT: pushl $0
1138 ; X86-NEXT: pushl %eax
1139 ; X86-NEXT: calll __modti3
1140 ; X86-NEXT: addl $32, %esp
1141 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
1142 ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
1143 ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
1144 ; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
1145 ; X86-NEXT: orl %eax, %ecx
1146 ; X86-NEXT: setne %al
1147 ; X86-NEXT: testb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
1148 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
1149 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1150 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
1151 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
1152 ; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
1153 ; X86-NEXT: cmpl $-1, %ebx
1154 ; X86-NEXT: movl %edi, %eax
1155 ; X86-NEXT: sbbl $0, %eax
1156 ; X86-NEXT: movl %ecx, %eax
1157 ; X86-NEXT: sbbl $0, %eax
1158 ; X86-NEXT: movl %esi, %eax
1159 ; X86-NEXT: sbbl $0, %eax
1160 ; X86-NEXT: movl $0, %eax
1161 ; X86-NEXT: cmovgel %eax, %esi
1162 ; X86-NEXT: cmovgel %eax, %ecx
1163 ; X86-NEXT: cmovgel %eax, %edi
1164 ; X86-NEXT: movl $-1, %edx
1165 ; X86-NEXT: cmovgel %edx, %ebx
1166 ; X86-NEXT: movl %ebx, %eax
1167 ; X86-NEXT: negl %eax
1168 ; X86-NEXT: movl $-1, %eax
1169 ; X86-NEXT: sbbl %edi, %eax
1170 ; X86-NEXT: movl $-1, %eax
1171 ; X86-NEXT: sbbl %ecx, %eax
1172 ; X86-NEXT: movl $-1, %eax
1173 ; X86-NEXT: sbbl %esi, %eax
1174 ; X86-NEXT: movl $0, %eax
1175 ; X86-NEXT: cmovgel %eax, %ebx
1176 ; X86-NEXT: cmovgel %edx, %edi
1177 ; X86-NEXT: shldl $31, %ebx, %edi
1178 ; X86-NEXT: movl 8(%ebp), %eax
1179 ; X86-NEXT: movl %edi, 12(%eax)
1180 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1181 ; X86-NEXT: movl %ecx, 8(%eax)
1182 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1183 ; X86-NEXT: movl %ecx, 4(%eax)
1184 ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
1185 ; X86-NEXT: movl %ecx, (%eax)
1186 ; X86-NEXT: leal -12(%ebp), %esp
1187 ; X86-NEXT: popl %esi
1188 ; X86-NEXT: popl %edi
1189 ; X86-NEXT: popl %ebx
1190 ; X86-NEXT: popl %ebp
1192 %tmp = call <4 x i32> @llvm.sdiv.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 31)