1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=+cmov < %s | FileCheck %s --check-prefix=X87
3 ; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=sse2 < %s | FileCheck %s --check-prefix=X86-SSE
4 ; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=SSE
5 ; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX,AVX1
6 ; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefixes=AVX,AVX512
7 ; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512dq < %s | FileCheck %s --check-prefixes=AVX,AVX512
9 ; Verify that constants aren't folded to inexact results when the rounding mode
13 ; // Because 0.1 cannot be represented exactly, this shouldn't be folded.
17 define double @f1() #0 {
19 ; X87: # %bb.0: # %entry
21 ; X87-NEXT: fdivs {{\.?LCPI[0-9]+_[0-9]+}}
26 ; X86-SSE: # %bb.0: # %entry
27 ; X86-SSE-NEXT: subl $12, %esp
28 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
29 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
30 ; X86-SSE-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
31 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
32 ; X86-SSE-NEXT: fldl (%esp)
34 ; X86-SSE-NEXT: addl $12, %esp
35 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
39 ; SSE: # %bb.0: # %entry
40 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
41 ; SSE-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
45 ; AVX: # %bb.0: # %entry
46 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
47 ; AVX-NEXT: vdivsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
50 %div = call double @llvm.experimental.constrained.fdiv.f64(
53 metadata !"round.dynamic",
54 metadata !"fpexcept.strict") #0
58 ; Verify that 'a - 0' isn't simplified to 'a' when the rounding mode is unknown.
60 ; double f2(double a) {
61 ; // Because the result of '0 - 0' is negative zero if rounding mode is
62 ; // downward, this shouldn't be simplified.
66 define double @f2(double %a) #0 {
68 ; X87: # %bb.0: # %entry
70 ; X87-NEXT: fsubrl {{[0-9]+}}(%esp)
75 ; X86-SSE: # %bb.0: # %entry
76 ; X86-SSE-NEXT: subl $12, %esp
77 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
78 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
79 ; X86-SSE-NEXT: xorpd %xmm1, %xmm1
80 ; X86-SSE-NEXT: subsd %xmm1, %xmm0
81 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
82 ; X86-SSE-NEXT: fldl (%esp)
84 ; X86-SSE-NEXT: addl $12, %esp
85 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
89 ; SSE: # %bb.0: # %entry
90 ; SSE-NEXT: xorpd %xmm1, %xmm1
91 ; SSE-NEXT: subsd %xmm1, %xmm0
95 ; AVX: # %bb.0: # %entry
96 ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
97 ; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
100 %sub = call double @llvm.experimental.constrained.fsub.f64(
103 metadata !"round.dynamic",
104 metadata !"fpexcept.strict") #0
108 ; Verify that '-((-a)*b)' isn't simplified to 'a*b' when the rounding mode is
111 ; double f3(double a, double b) {
112 ; // Because the intermediate value involved in this calculation may require
113 ; // rounding, this shouldn't be simplified.
117 define double @f3(double %a, double %b) #0 {
119 ; X87: # %bb.0: # %entry
122 ; X87-NEXT: fld %st(0)
123 ; X87-NEXT: fsubl {{[0-9]+}}(%esp)
124 ; X87-NEXT: fmull {{[0-9]+}}(%esp)
125 ; X87-NEXT: fsubrp %st, %st(1)
130 ; X86-SSE: # %bb.0: # %entry
131 ; X86-SSE-NEXT: subl $12, %esp
132 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
133 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [-0.0E+0,0.0E+0]
134 ; X86-SSE-NEXT: movapd %xmm0, %xmm1
135 ; X86-SSE-NEXT: subsd {{[0-9]+}}(%esp), %xmm1
136 ; X86-SSE-NEXT: mulsd {{[0-9]+}}(%esp), %xmm1
137 ; X86-SSE-NEXT: subsd %xmm1, %xmm0
138 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
139 ; X86-SSE-NEXT: fldl (%esp)
141 ; X86-SSE-NEXT: addl $12, %esp
142 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
146 ; SSE: # %bb.0: # %entry
147 ; SSE-NEXT: movsd {{.*#+}} xmm2 = [-0.0E+0,0.0E+0]
148 ; SSE-NEXT: movapd %xmm2, %xmm3
149 ; SSE-NEXT: subsd %xmm0, %xmm3
150 ; SSE-NEXT: mulsd %xmm1, %xmm3
151 ; SSE-NEXT: subsd %xmm3, %xmm2
152 ; SSE-NEXT: movapd %xmm2, %xmm0
156 ; AVX: # %bb.0: # %entry
157 ; AVX-NEXT: vmovsd {{.*#+}} xmm2 = [-0.0E+0,0.0E+0]
158 ; AVX-NEXT: vsubsd %xmm0, %xmm2, %xmm0
159 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
160 ; AVX-NEXT: vsubsd %xmm0, %xmm2, %xmm0
163 %sub = call double @llvm.experimental.constrained.fsub.f64(
164 double -0.000000e+00, double %a,
165 metadata !"round.dynamic",
166 metadata !"fpexcept.strict") #0
167 %mul = call double @llvm.experimental.constrained.fmul.f64(
168 double %sub, double %b,
169 metadata !"round.dynamic",
170 metadata !"fpexcept.strict") #0
171 %ret = call double @llvm.experimental.constrained.fsub.f64(
172 double -0.000000e+00,
174 metadata !"round.dynamic",
175 metadata !"fpexcept.strict") #0
179 ; Verify that FP operations are not performed speculatively when FP exceptions
180 ; are not being ignored.
182 ; double f4(int n, double a) {
183 ; // Because a + 1 may overflow, this should not be simplified.
190 define double @f4(i32 %n, double %a) #0 {
192 ; X87: # %bb.0: # %entry
193 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
195 ; X87-NEXT: cmpl $0, {{[0-9]+}}(%esp)
196 ; X87-NEXT: jle .LBB3_2
197 ; X87-NEXT: # %bb.1: # %if.then
199 ; X87-NEXT: faddp %st, %st(1)
201 ; X87-NEXT: .LBB3_2: # %if.end
205 ; X86-SSE: # %bb.0: # %entry
206 ; X86-SSE-NEXT: subl $12, %esp
207 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
208 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
209 ; X86-SSE-NEXT: cmpl $0, {{[0-9]+}}(%esp)
210 ; X86-SSE-NEXT: jle .LBB3_2
211 ; X86-SSE-NEXT: # %bb.1: # %if.then
212 ; X86-SSE-NEXT: addsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
213 ; X86-SSE-NEXT: .LBB3_2: # %if.end
214 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
215 ; X86-SSE-NEXT: fldl (%esp)
217 ; X86-SSE-NEXT: addl $12, %esp
218 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
222 ; SSE: # %bb.0: # %entry
223 ; SSE-NEXT: testl %edi, %edi
224 ; SSE-NEXT: jle .LBB3_2
225 ; SSE-NEXT: # %bb.1: # %if.then
226 ; SSE-NEXT: addsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
227 ; SSE-NEXT: .LBB3_2: # %if.end
231 ; AVX: # %bb.0: # %entry
232 ; AVX-NEXT: testl %edi, %edi
233 ; AVX-NEXT: jle .LBB3_2
234 ; AVX-NEXT: # %bb.1: # %if.then
235 ; AVX-NEXT: vaddsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
236 ; AVX-NEXT: .LBB3_2: # %if.end
239 %cmp = icmp sgt i32 %n, 0
240 br i1 %cmp, label %if.then, label %if.end
243 %add = call double @llvm.experimental.constrained.fadd.f64(
244 double 1.000000e+00, double %a,
245 metadata !"round.dynamic",
246 metadata !"fpexcept.strict") #0
250 %a.0 = phi double [%add, %if.then], [ %a, %entry ]
254 ; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown.
255 define double @f5() #0 {
257 ; X87: # %bb.0: # %entry
258 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
264 ; X86-SSE: # %bb.0: # %entry
265 ; X86-SSE-NEXT: subl $12, %esp
266 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
267 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
268 ; X86-SSE-NEXT: sqrtsd %xmm0, %xmm0
269 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
270 ; X86-SSE-NEXT: fldl (%esp)
272 ; X86-SSE-NEXT: addl $12, %esp
273 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
277 ; SSE: # %bb.0: # %entry
278 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
279 ; SSE-NEXT: sqrtsd %xmm0, %xmm0
283 ; AVX: # %bb.0: # %entry
284 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
285 ; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
288 %result = call double @llvm.experimental.constrained.sqrt.f64(double 42.0,
289 metadata !"round.dynamic",
290 metadata !"fpexcept.strict") #0
294 ; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown.
295 define double @f6() #0 {
297 ; X87: # %bb.0: # %entry
298 ; X87-NEXT: subl $28, %esp
299 ; X87-NEXT: .cfi_def_cfa_offset 32
300 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
301 ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
302 ; X87-NEXT: fldl {{\.?LCPI[0-9]+_[0-9]+}}
303 ; X87-NEXT: fstpl (%esp)
305 ; X87-NEXT: calll pow
306 ; X87-NEXT: addl $28, %esp
307 ; X87-NEXT: .cfi_def_cfa_offset 4
311 ; X86-SSE: # %bb.0: # %entry
312 ; X86-SSE-NEXT: subl $28, %esp
313 ; X86-SSE-NEXT: .cfi_def_cfa_offset 32
314 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [3.0E+0,0.0E+0]
315 ; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
316 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
317 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
318 ; X86-SSE-NEXT: calll pow
319 ; X86-SSE-NEXT: addl $28, %esp
320 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
324 ; SSE: # %bb.0: # %entry
325 ; SSE-NEXT: pushq %rax
326 ; SSE-NEXT: .cfi_def_cfa_offset 16
327 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
328 ; SSE-NEXT: movsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
329 ; SSE-NEXT: callq pow@PLT
330 ; SSE-NEXT: popq %rax
331 ; SSE-NEXT: .cfi_def_cfa_offset 8
335 ; AVX: # %bb.0: # %entry
336 ; AVX-NEXT: pushq %rax
337 ; AVX-NEXT: .cfi_def_cfa_offset 16
338 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
339 ; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [3.0E+0,0.0E+0]
340 ; AVX-NEXT: callq pow@PLT
341 ; AVX-NEXT: popq %rax
342 ; AVX-NEXT: .cfi_def_cfa_offset 8
345 %result = call double @llvm.experimental.constrained.pow.f64(double 42.1,
347 metadata !"round.dynamic",
348 metadata !"fpexcept.strict") #0
352 ; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown.
353 define double @f7() #0 {
355 ; X87: # %bb.0: # %entry
356 ; X87-NEXT: subl $12, %esp
357 ; X87-NEXT: .cfi_def_cfa_offset 16
358 ; X87-NEXT: fldl {{\.?LCPI[0-9]+_[0-9]+}}
359 ; X87-NEXT: fstpl (%esp)
361 ; X87-NEXT: movl $3, {{[0-9]+}}(%esp)
362 ; X87-NEXT: calll __powidf2
363 ; X87-NEXT: addl $12, %esp
364 ; X87-NEXT: .cfi_def_cfa_offset 4
368 ; X86-SSE: # %bb.0: # %entry
369 ; X86-SSE-NEXT: subl $12, %esp
370 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
371 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
372 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
373 ; X86-SSE-NEXT: movl $3, {{[0-9]+}}(%esp)
374 ; X86-SSE-NEXT: calll __powidf2
375 ; X86-SSE-NEXT: addl $12, %esp
376 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
380 ; SSE: # %bb.0: # %entry
381 ; SSE-NEXT: pushq %rax
382 ; SSE-NEXT: .cfi_def_cfa_offset 16
383 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
384 ; SSE-NEXT: movl $3, %edi
385 ; SSE-NEXT: callq __powidf2@PLT
386 ; SSE-NEXT: popq %rax
387 ; SSE-NEXT: .cfi_def_cfa_offset 8
391 ; AVX: # %bb.0: # %entry
392 ; AVX-NEXT: pushq %rax
393 ; AVX-NEXT: .cfi_def_cfa_offset 16
394 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
395 ; AVX-NEXT: movl $3, %edi
396 ; AVX-NEXT: callq __powidf2@PLT
397 ; AVX-NEXT: popq %rax
398 ; AVX-NEXT: .cfi_def_cfa_offset 8
401 %result = call double @llvm.experimental.constrained.powi.f64(double 42.1,
403 metadata !"round.dynamic",
404 metadata !"fpexcept.strict") #0
408 ; Verify that sin(42.0) isn't simplified when the rounding mode is unknown.
409 define double @f8() #0 {
411 ; X87: # %bb.0: # %entry
412 ; X87-NEXT: subl $12, %esp
413 ; X87-NEXT: .cfi_def_cfa_offset 16
414 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
415 ; X87-NEXT: fstpl (%esp)
417 ; X87-NEXT: calll sin
418 ; X87-NEXT: addl $12, %esp
419 ; X87-NEXT: .cfi_def_cfa_offset 4
423 ; X86-SSE: # %bb.0: # %entry
424 ; X86-SSE-NEXT: subl $12, %esp
425 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
426 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
427 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
428 ; X86-SSE-NEXT: calll sin
429 ; X86-SSE-NEXT: addl $12, %esp
430 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
434 ; SSE: # %bb.0: # %entry
435 ; SSE-NEXT: pushq %rax
436 ; SSE-NEXT: .cfi_def_cfa_offset 16
437 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
438 ; SSE-NEXT: callq sin@PLT
439 ; SSE-NEXT: popq %rax
440 ; SSE-NEXT: .cfi_def_cfa_offset 8
444 ; AVX: # %bb.0: # %entry
445 ; AVX-NEXT: pushq %rax
446 ; AVX-NEXT: .cfi_def_cfa_offset 16
447 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
448 ; AVX-NEXT: callq sin@PLT
449 ; AVX-NEXT: popq %rax
450 ; AVX-NEXT: .cfi_def_cfa_offset 8
453 %result = call double @llvm.experimental.constrained.sin.f64(double 42.0,
454 metadata !"round.dynamic",
455 metadata !"fpexcept.strict") #0
459 ; Verify that cos(42.0) isn't simplified when the rounding mode is unknown.
460 define double @f9() #0 {
462 ; X87: # %bb.0: # %entry
463 ; X87-NEXT: subl $12, %esp
464 ; X87-NEXT: .cfi_def_cfa_offset 16
465 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
466 ; X87-NEXT: fstpl (%esp)
468 ; X87-NEXT: calll cos
469 ; X87-NEXT: addl $12, %esp
470 ; X87-NEXT: .cfi_def_cfa_offset 4
474 ; X86-SSE: # %bb.0: # %entry
475 ; X86-SSE-NEXT: subl $12, %esp
476 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
477 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
478 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
479 ; X86-SSE-NEXT: calll cos
480 ; X86-SSE-NEXT: addl $12, %esp
481 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
485 ; SSE: # %bb.0: # %entry
486 ; SSE-NEXT: pushq %rax
487 ; SSE-NEXT: .cfi_def_cfa_offset 16
488 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
489 ; SSE-NEXT: callq cos@PLT
490 ; SSE-NEXT: popq %rax
491 ; SSE-NEXT: .cfi_def_cfa_offset 8
495 ; AVX: # %bb.0: # %entry
496 ; AVX-NEXT: pushq %rax
497 ; AVX-NEXT: .cfi_def_cfa_offset 16
498 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
499 ; AVX-NEXT: callq cos@PLT
500 ; AVX-NEXT: popq %rax
501 ; AVX-NEXT: .cfi_def_cfa_offset 8
504 %result = call double @llvm.experimental.constrained.cos.f64(double 42.0,
505 metadata !"round.dynamic",
506 metadata !"fpexcept.strict") #0
510 ; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
511 define double @f10() #0 {
513 ; X87: # %bb.0: # %entry
514 ; X87-NEXT: subl $12, %esp
515 ; X87-NEXT: .cfi_def_cfa_offset 16
516 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
517 ; X87-NEXT: fstpl (%esp)
519 ; X87-NEXT: calll exp
520 ; X87-NEXT: addl $12, %esp
521 ; X87-NEXT: .cfi_def_cfa_offset 4
524 ; X86-SSE-LABEL: f10:
525 ; X86-SSE: # %bb.0: # %entry
526 ; X86-SSE-NEXT: subl $12, %esp
527 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
528 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
529 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
530 ; X86-SSE-NEXT: calll exp
531 ; X86-SSE-NEXT: addl $12, %esp
532 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
536 ; SSE: # %bb.0: # %entry
537 ; SSE-NEXT: pushq %rax
538 ; SSE-NEXT: .cfi_def_cfa_offset 16
539 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
540 ; SSE-NEXT: callq exp@PLT
541 ; SSE-NEXT: popq %rax
542 ; SSE-NEXT: .cfi_def_cfa_offset 8
546 ; AVX: # %bb.0: # %entry
547 ; AVX-NEXT: pushq %rax
548 ; AVX-NEXT: .cfi_def_cfa_offset 16
549 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
550 ; AVX-NEXT: callq exp@PLT
551 ; AVX-NEXT: popq %rax
552 ; AVX-NEXT: .cfi_def_cfa_offset 8
555 %result = call double @llvm.experimental.constrained.exp.f64(double 42.0,
556 metadata !"round.dynamic",
557 metadata !"fpexcept.strict") #0
561 ; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown.
562 define double @f11() #0 {
564 ; X87: # %bb.0: # %entry
565 ; X87-NEXT: subl $12, %esp
566 ; X87-NEXT: .cfi_def_cfa_offset 16
567 ; X87-NEXT: fldl {{\.?LCPI[0-9]+_[0-9]+}}
568 ; X87-NEXT: fstpl (%esp)
570 ; X87-NEXT: calll exp2
571 ; X87-NEXT: addl $12, %esp
572 ; X87-NEXT: .cfi_def_cfa_offset 4
575 ; X86-SSE-LABEL: f11:
576 ; X86-SSE: # %bb.0: # %entry
577 ; X86-SSE-NEXT: subl $12, %esp
578 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
579 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
580 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
581 ; X86-SSE-NEXT: calll exp2
582 ; X86-SSE-NEXT: addl $12, %esp
583 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
587 ; SSE: # %bb.0: # %entry
588 ; SSE-NEXT: pushq %rax
589 ; SSE-NEXT: .cfi_def_cfa_offset 16
590 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
591 ; SSE-NEXT: callq exp2@PLT
592 ; SSE-NEXT: popq %rax
593 ; SSE-NEXT: .cfi_def_cfa_offset 8
597 ; AVX: # %bb.0: # %entry
598 ; AVX-NEXT: pushq %rax
599 ; AVX-NEXT: .cfi_def_cfa_offset 16
600 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
601 ; AVX-NEXT: callq exp2@PLT
602 ; AVX-NEXT: popq %rax
603 ; AVX-NEXT: .cfi_def_cfa_offset 8
606 %result = call double @llvm.experimental.constrained.exp2.f64(double 42.1,
607 metadata !"round.dynamic",
608 metadata !"fpexcept.strict") #0
612 ; Verify that log(42.0) isn't simplified when the rounding mode is unknown.
613 define double @f12() #0 {
615 ; X87: # %bb.0: # %entry
616 ; X87-NEXT: subl $12, %esp
617 ; X87-NEXT: .cfi_def_cfa_offset 16
618 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
619 ; X87-NEXT: fstpl (%esp)
621 ; X87-NEXT: calll log
622 ; X87-NEXT: addl $12, %esp
623 ; X87-NEXT: .cfi_def_cfa_offset 4
626 ; X86-SSE-LABEL: f12:
627 ; X86-SSE: # %bb.0: # %entry
628 ; X86-SSE-NEXT: subl $12, %esp
629 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
630 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
631 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
632 ; X86-SSE-NEXT: calll log
633 ; X86-SSE-NEXT: addl $12, %esp
634 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
638 ; SSE: # %bb.0: # %entry
639 ; SSE-NEXT: pushq %rax
640 ; SSE-NEXT: .cfi_def_cfa_offset 16
641 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
642 ; SSE-NEXT: callq log@PLT
643 ; SSE-NEXT: popq %rax
644 ; SSE-NEXT: .cfi_def_cfa_offset 8
648 ; AVX: # %bb.0: # %entry
649 ; AVX-NEXT: pushq %rax
650 ; AVX-NEXT: .cfi_def_cfa_offset 16
651 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
652 ; AVX-NEXT: callq log@PLT
653 ; AVX-NEXT: popq %rax
654 ; AVX-NEXT: .cfi_def_cfa_offset 8
657 %result = call double @llvm.experimental.constrained.log.f64(double 42.0,
658 metadata !"round.dynamic",
659 metadata !"fpexcept.strict") #0
663 ; Verify that log10(42.0) isn't simplified when the rounding mode is unknown.
664 define double @f13() #0 {
666 ; X87: # %bb.0: # %entry
667 ; X87-NEXT: subl $12, %esp
668 ; X87-NEXT: .cfi_def_cfa_offset 16
669 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
670 ; X87-NEXT: fstpl (%esp)
672 ; X87-NEXT: calll log10
673 ; X87-NEXT: addl $12, %esp
674 ; X87-NEXT: .cfi_def_cfa_offset 4
677 ; X86-SSE-LABEL: f13:
678 ; X86-SSE: # %bb.0: # %entry
679 ; X86-SSE-NEXT: subl $12, %esp
680 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
681 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
682 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
683 ; X86-SSE-NEXT: calll log10
684 ; X86-SSE-NEXT: addl $12, %esp
685 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
689 ; SSE: # %bb.0: # %entry
690 ; SSE-NEXT: pushq %rax
691 ; SSE-NEXT: .cfi_def_cfa_offset 16
692 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
693 ; SSE-NEXT: callq log10@PLT
694 ; SSE-NEXT: popq %rax
695 ; SSE-NEXT: .cfi_def_cfa_offset 8
699 ; AVX: # %bb.0: # %entry
700 ; AVX-NEXT: pushq %rax
701 ; AVX-NEXT: .cfi_def_cfa_offset 16
702 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
703 ; AVX-NEXT: callq log10@PLT
704 ; AVX-NEXT: popq %rax
705 ; AVX-NEXT: .cfi_def_cfa_offset 8
708 %result = call double @llvm.experimental.constrained.log10.f64(double 42.0,
709 metadata !"round.dynamic",
710 metadata !"fpexcept.strict") #0
714 ; Verify that log2(42.0) isn't simplified when the rounding mode is unknown.
715 define double @f14() #0 {
717 ; X87: # %bb.0: # %entry
718 ; X87-NEXT: subl $12, %esp
719 ; X87-NEXT: .cfi_def_cfa_offset 16
720 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
721 ; X87-NEXT: fstpl (%esp)
723 ; X87-NEXT: calll log2
724 ; X87-NEXT: addl $12, %esp
725 ; X87-NEXT: .cfi_def_cfa_offset 4
728 ; X86-SSE-LABEL: f14:
729 ; X86-SSE: # %bb.0: # %entry
730 ; X86-SSE-NEXT: subl $12, %esp
731 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
732 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
733 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
734 ; X86-SSE-NEXT: calll log2
735 ; X86-SSE-NEXT: addl $12, %esp
736 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
740 ; SSE: # %bb.0: # %entry
741 ; SSE-NEXT: pushq %rax
742 ; SSE-NEXT: .cfi_def_cfa_offset 16
743 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
744 ; SSE-NEXT: callq log2@PLT
745 ; SSE-NEXT: popq %rax
746 ; SSE-NEXT: .cfi_def_cfa_offset 8
750 ; AVX: # %bb.0: # %entry
751 ; AVX-NEXT: pushq %rax
752 ; AVX-NEXT: .cfi_def_cfa_offset 16
753 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
754 ; AVX-NEXT: callq log2@PLT
755 ; AVX-NEXT: popq %rax
756 ; AVX-NEXT: .cfi_def_cfa_offset 8
759 %result = call double @llvm.experimental.constrained.log2.f64(double 42.0,
760 metadata !"round.dynamic",
761 metadata !"fpexcept.strict") #0
765 ; Verify that rint(42.1) isn't simplified when the rounding mode is unknown.
766 define double @f15() #0 {
768 ; X87: # %bb.0: # %entry
769 ; X87-NEXT: subl $12, %esp
770 ; X87-NEXT: .cfi_def_cfa_offset 16
771 ; X87-NEXT: fldl {{\.?LCPI[0-9]+_[0-9]+}}
772 ; X87-NEXT: fstpl (%esp)
774 ; X87-NEXT: calll rint
775 ; X87-NEXT: addl $12, %esp
776 ; X87-NEXT: .cfi_def_cfa_offset 4
779 ; X86-SSE-LABEL: f15:
780 ; X86-SSE: # %bb.0: # %entry
781 ; X86-SSE-NEXT: subl $12, %esp
782 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
783 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
784 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
785 ; X86-SSE-NEXT: calll rint
786 ; X86-SSE-NEXT: addl $12, %esp
787 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
791 ; SSE: # %bb.0: # %entry
792 ; SSE-NEXT: pushq %rax
793 ; SSE-NEXT: .cfi_def_cfa_offset 16
794 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
795 ; SSE-NEXT: callq rint@PLT
796 ; SSE-NEXT: popq %rax
797 ; SSE-NEXT: .cfi_def_cfa_offset 8
801 ; AVX: # %bb.0: # %entry
802 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
803 ; AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
806 %result = call double @llvm.experimental.constrained.rint.f64(double 42.1,
807 metadata !"round.dynamic",
808 metadata !"fpexcept.strict") #0
812 ; Verify that nearbyint(42.1) isn't simplified when the rounding mode is
814 define double @f16() #0 {
816 ; X87: # %bb.0: # %entry
817 ; X87-NEXT: subl $12, %esp
818 ; X87-NEXT: .cfi_def_cfa_offset 16
819 ; X87-NEXT: fldl {{\.?LCPI[0-9]+_[0-9]+}}
820 ; X87-NEXT: fstpl (%esp)
822 ; X87-NEXT: calll nearbyint
823 ; X87-NEXT: addl $12, %esp
824 ; X87-NEXT: .cfi_def_cfa_offset 4
827 ; X86-SSE-LABEL: f16:
828 ; X86-SSE: # %bb.0: # %entry
829 ; X86-SSE-NEXT: subl $12, %esp
830 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
831 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
832 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
833 ; X86-SSE-NEXT: calll nearbyint
834 ; X86-SSE-NEXT: addl $12, %esp
835 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
839 ; SSE: # %bb.0: # %entry
840 ; SSE-NEXT: pushq %rax
841 ; SSE-NEXT: .cfi_def_cfa_offset 16
842 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
843 ; SSE-NEXT: callq nearbyint@PLT
844 ; SSE-NEXT: popq %rax
845 ; SSE-NEXT: .cfi_def_cfa_offset 8
849 ; AVX: # %bb.0: # %entry
850 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
851 ; AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
854 %result = call double @llvm.experimental.constrained.nearbyint.f64(
856 metadata !"round.dynamic",
857 metadata !"fpexcept.strict") #0
861 define double @f19() #0 {
863 ; X87: # %bb.0: # %entry
864 ; X87-NEXT: subl $28, %esp
865 ; X87-NEXT: .cfi_def_cfa_offset 32
866 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
867 ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
869 ; X87-NEXT: fstpl (%esp)
871 ; X87-NEXT: calll fmod
872 ; X87-NEXT: addl $28, %esp
873 ; X87-NEXT: .cfi_def_cfa_offset 4
876 ; X86-SSE-LABEL: f19:
877 ; X86-SSE: # %bb.0: # %entry
878 ; X86-SSE-NEXT: subl $28, %esp
879 ; X86-SSE-NEXT: .cfi_def_cfa_offset 32
880 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+1,0.0E+0]
881 ; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
882 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
883 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
884 ; X86-SSE-NEXT: calll fmod
885 ; X86-SSE-NEXT: addl $28, %esp
886 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
890 ; SSE: # %bb.0: # %entry
891 ; SSE-NEXT: pushq %rax
892 ; SSE-NEXT: .cfi_def_cfa_offset 16
893 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
894 ; SSE-NEXT: movsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
895 ; SSE-NEXT: callq fmod@PLT
896 ; SSE-NEXT: popq %rax
897 ; SSE-NEXT: .cfi_def_cfa_offset 8
901 ; AVX: # %bb.0: # %entry
902 ; AVX-NEXT: pushq %rax
903 ; AVX-NEXT: .cfi_def_cfa_offset 16
904 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
905 ; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+1,0.0E+0]
906 ; AVX-NEXT: callq fmod@PLT
907 ; AVX-NEXT: popq %rax
908 ; AVX-NEXT: .cfi_def_cfa_offset 8
911 %rem = call double @llvm.experimental.constrained.frem.f64(
914 metadata !"round.dynamic",
915 metadata !"fpexcept.strict") #0
919 ; Verify that fptosi(%x) isn't simplified when the rounding mode is
921 ; Verify that no gross errors happen.
922 ; FIXME: The SSE/AVX code does not raise an invalid exception for all values
923 ; that don't fit in i8.
924 define i8 @f20s8(double %x) #0 {
926 ; X87: # %bb.0: # %entry
927 ; X87-NEXT: subl $8, %esp
928 ; X87-NEXT: .cfi_def_cfa_offset 12
929 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
931 ; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
932 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
933 ; X87-NEXT: orl $3072, %eax # imm = 0xC00
934 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
935 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
936 ; X87-NEXT: fistps {{[0-9]+}}(%esp)
937 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
938 ; X87-NEXT: movzbl {{[0-9]+}}(%esp), %eax
939 ; X87-NEXT: addl $8, %esp
940 ; X87-NEXT: .cfi_def_cfa_offset 4
943 ; X86-SSE-LABEL: f20s8:
944 ; X86-SSE: # %bb.0: # %entry
945 ; X86-SSE-NEXT: cvttsd2si {{[0-9]+}}(%esp), %eax
946 ; X86-SSE-NEXT: # kill: def $al killed $al killed $eax
950 ; SSE: # %bb.0: # %entry
951 ; SSE-NEXT: cvttsd2si %xmm0, %eax
952 ; SSE-NEXT: # kill: def $al killed $al killed $eax
956 ; AVX: # %bb.0: # %entry
957 ; AVX-NEXT: vcvttsd2si %xmm0, %eax
958 ; AVX-NEXT: # kill: def $al killed $al killed $eax
961 %result = call i8 @llvm.experimental.constrained.fptosi.i8.f64(double %x,
962 metadata !"fpexcept.strict") #0
966 ; Verify that fptosi(%x) isn't simplified when the rounding mode is
968 ; Verify that no gross errors happen.
969 ; FIXME: The SSE/AVX code does not raise an invalid exception for all values
970 ; that don't fit in i16.
971 define i16 @f20s16(double %x) #0 {
973 ; X87: # %bb.0: # %entry
974 ; X87-NEXT: subl $8, %esp
975 ; X87-NEXT: .cfi_def_cfa_offset 12
976 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
978 ; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
979 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
980 ; X87-NEXT: orl $3072, %eax # imm = 0xC00
981 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
982 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
983 ; X87-NEXT: fistps {{[0-9]+}}(%esp)
984 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
985 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
986 ; X87-NEXT: addl $8, %esp
987 ; X87-NEXT: .cfi_def_cfa_offset 4
990 ; X86-SSE-LABEL: f20s16:
991 ; X86-SSE: # %bb.0: # %entry
992 ; X86-SSE-NEXT: cvttsd2si {{[0-9]+}}(%esp), %eax
993 ; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
997 ; SSE: # %bb.0: # %entry
998 ; SSE-NEXT: cvttsd2si %xmm0, %eax
999 ; SSE-NEXT: # kill: def $ax killed $ax killed $eax
1002 ; AVX-LABEL: f20s16:
1003 ; AVX: # %bb.0: # %entry
1004 ; AVX-NEXT: vcvttsd2si %xmm0, %eax
1005 ; AVX-NEXT: # kill: def $ax killed $ax killed $eax
1008 %result = call i16 @llvm.experimental.constrained.fptosi.i16.f64(double %x,
1009 metadata !"fpexcept.strict") #0
1013 ; Verify that fptosi(%x) isn't simplified when the rounding mode is
1015 ; Verify that no gross errors happen.
1016 define i32 @f20s(double %x) #0 {
1018 ; X87: # %bb.0: # %entry
1019 ; X87-NEXT: subl $8, %esp
1020 ; X87-NEXT: .cfi_def_cfa_offset 12
1021 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1023 ; X87-NEXT: fnstcw (%esp)
1024 ; X87-NEXT: movzwl (%esp), %eax
1025 ; X87-NEXT: orl $3072, %eax # imm = 0xC00
1026 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
1027 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1028 ; X87-NEXT: fistpl {{[0-9]+}}(%esp)
1029 ; X87-NEXT: fldcw (%esp)
1030 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
1031 ; X87-NEXT: addl $8, %esp
1032 ; X87-NEXT: .cfi_def_cfa_offset 4
1035 ; X86-SSE-LABEL: f20s:
1036 ; X86-SSE: # %bb.0: # %entry
1037 ; X86-SSE-NEXT: cvttsd2si {{[0-9]+}}(%esp), %eax
1038 ; X86-SSE-NEXT: retl
1041 ; SSE: # %bb.0: # %entry
1042 ; SSE-NEXT: cvttsd2si %xmm0, %eax
1046 ; AVX: # %bb.0: # %entry
1047 ; AVX-NEXT: vcvttsd2si %xmm0, %eax
1050 %result = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %x,
1051 metadata !"fpexcept.strict") #0
1055 ; Verify that fptoui(%x) isn't simplified when the rounding mode is
1057 ; Verify that no gross errors happen.
1058 ; FIXME: This code generates spurious inexact exceptions.
1059 define i64 @f20s64(double %x) #0 {
1060 ; X87-LABEL: f20s64:
1061 ; X87: # %bb.0: # %entry
1062 ; X87-NEXT: subl $20, %esp
1063 ; X87-NEXT: .cfi_def_cfa_offset 24
1064 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1066 ; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
1067 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
1068 ; X87-NEXT: orl $3072, %eax # imm = 0xC00
1069 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
1070 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1071 ; X87-NEXT: fistpll {{[0-9]+}}(%esp)
1072 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1073 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
1074 ; X87-NEXT: movl {{[0-9]+}}(%esp), %edx
1075 ; X87-NEXT: addl $20, %esp
1076 ; X87-NEXT: .cfi_def_cfa_offset 4
1079 ; X86-SSE-LABEL: f20s64:
1080 ; X86-SSE: # %bb.0: # %entry
1081 ; X86-SSE-NEXT: subl $20, %esp
1082 ; X86-SSE-NEXT: .cfi_def_cfa_offset 24
1083 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1084 ; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
1085 ; X86-SSE-NEXT: fldl {{[0-9]+}}(%esp)
1086 ; X86-SSE-NEXT: wait
1087 ; X86-SSE-NEXT: fnstcw {{[0-9]+}}(%esp)
1088 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
1089 ; X86-SSE-NEXT: orl $3072, %eax # imm = 0xC00
1090 ; X86-SSE-NEXT: movw %ax, {{[0-9]+}}(%esp)
1091 ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
1092 ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
1093 ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
1094 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1095 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
1096 ; X86-SSE-NEXT: addl $20, %esp
1097 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1098 ; X86-SSE-NEXT: retl
1100 ; SSE-LABEL: f20s64:
1101 ; SSE: # %bb.0: # %entry
1102 ; SSE-NEXT: cvttsd2si %xmm0, %rax
1105 ; AVX-LABEL: f20s64:
1106 ; AVX: # %bb.0: # %entry
1107 ; AVX-NEXT: vcvttsd2si %xmm0, %rax
1110 %result = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x,
1111 metadata !"fpexcept.strict") #0
1115 ; Verify that fptoui(%x) isn't simplified when the rounding mode is
1117 ; Verify that no gross errors happen.
1118 define i128 @f20s128(double %x) nounwind strictfp {
1119 ; X87-LABEL: f20s128:
1120 ; X87: # %bb.0: # %entry
1121 ; X87-NEXT: pushl %edi
1122 ; X87-NEXT: pushl %esi
1123 ; X87-NEXT: subl $36, %esp
1124 ; X87-NEXT: movl {{[0-9]+}}(%esp), %esi
1125 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1126 ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
1128 ; X87-NEXT: leal {{[0-9]+}}(%esp), %eax
1129 ; X87-NEXT: movl %eax, (%esp)
1130 ; X87-NEXT: calll __fixdfti
1131 ; X87-NEXT: subl $4, %esp
1132 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
1133 ; X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
1134 ; X87-NEXT: movl {{[0-9]+}}(%esp), %edx
1135 ; X87-NEXT: movl {{[0-9]+}}(%esp), %edi
1136 ; X87-NEXT: movl %edi, 8(%esi)
1137 ; X87-NEXT: movl %edx, 12(%esi)
1138 ; X87-NEXT: movl %eax, (%esi)
1139 ; X87-NEXT: movl %ecx, 4(%esi)
1140 ; X87-NEXT: movl %esi, %eax
1141 ; X87-NEXT: addl $36, %esp
1142 ; X87-NEXT: popl %esi
1143 ; X87-NEXT: popl %edi
1146 ; X86-SSE-LABEL: f20s128:
1147 ; X86-SSE: # %bb.0: # %entry
1148 ; X86-SSE-NEXT: pushl %esi
1149 ; X86-SSE-NEXT: subl $40, %esp
1150 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
1151 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1152 ; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
1153 ; X86-SSE-NEXT: leal {{[0-9]+}}(%esp), %eax
1154 ; X86-SSE-NEXT: movl %eax, (%esp)
1155 ; X86-SSE-NEXT: calll __fixdfti
1156 ; X86-SSE-NEXT: subl $4, %esp
1157 ; X86-SSE-NEXT: movaps {{[0-9]+}}(%esp), %xmm0
1158 ; X86-SSE-NEXT: movaps %xmm0, (%esi)
1159 ; X86-SSE-NEXT: movl %esi, %eax
1160 ; X86-SSE-NEXT: addl $40, %esp
1161 ; X86-SSE-NEXT: popl %esi
1162 ; X86-SSE-NEXT: retl $4
1164 ; SSE-LABEL: f20s128:
1165 ; SSE: # %bb.0: # %entry
1166 ; SSE-NEXT: pushq %rax
1167 ; SSE-NEXT: callq __fixdfti@PLT
1168 ; SSE-NEXT: popq %rcx
1171 ; AVX-LABEL: f20s128:
1172 ; AVX: # %bb.0: # %entry
1173 ; AVX-NEXT: pushq %rax
1174 ; AVX-NEXT: callq __fixdfti@PLT
1175 ; AVX-NEXT: popq %rcx
1178 %result = call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %x,
1179 metadata !"fpexcept.strict") #0
1183 ; Verify that fptoui(%x) isn't simplified when the rounding mode is
1185 ; Verify that no gross errors happen.
1186 ; FIXME: The SSE/AVX code does not raise an invalid exception for all values
1187 ; that don't fit in i8.
1188 define i8 @f20u8(double %x) #0 {
1190 ; X87: # %bb.0: # %entry
1191 ; X87-NEXT: subl $8, %esp
1192 ; X87-NEXT: .cfi_def_cfa_offset 12
1193 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1195 ; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
1196 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
1197 ; X87-NEXT: orl $3072, %eax # imm = 0xC00
1198 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
1199 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1200 ; X87-NEXT: fistps {{[0-9]+}}(%esp)
1201 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1202 ; X87-NEXT: movzbl {{[0-9]+}}(%esp), %eax
1203 ; X87-NEXT: addl $8, %esp
1204 ; X87-NEXT: .cfi_def_cfa_offset 4
1207 ; X86-SSE-LABEL: f20u8:
1208 ; X86-SSE: # %bb.0: # %entry
1209 ; X86-SSE-NEXT: cvttsd2si {{[0-9]+}}(%esp), %eax
1210 ; X86-SSE-NEXT: # kill: def $al killed $al killed $eax
1211 ; X86-SSE-NEXT: retl
1214 ; SSE: # %bb.0: # %entry
1215 ; SSE-NEXT: cvttsd2si %xmm0, %eax
1216 ; SSE-NEXT: # kill: def $al killed $al killed $eax
1220 ; AVX: # %bb.0: # %entry
1221 ; AVX-NEXT: vcvttsd2si %xmm0, %eax
1222 ; AVX-NEXT: # kill: def $al killed $al killed $eax
1225 %result = call i8 @llvm.experimental.constrained.fptoui.i8.f64(double %x,
1226 metadata !"fpexcept.strict") #0
1229 ; Verify that fptoui(%x) isn't simplified when the rounding mode is
1231 ; Verify that no gross errors happen.
1232 ; FIXME: The SSE/AVX code does not raise an invalid exception for all values
1233 ; that don't fit in i16.
1234 define i16 @f20u16(double %x) #0 {
1235 ; X87-LABEL: f20u16:
1236 ; X87: # %bb.0: # %entry
1237 ; X87-NEXT: subl $8, %esp
1238 ; X87-NEXT: .cfi_def_cfa_offset 12
1239 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1241 ; X87-NEXT: fnstcw (%esp)
1242 ; X87-NEXT: movzwl (%esp), %eax
1243 ; X87-NEXT: orl $3072, %eax # imm = 0xC00
1244 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
1245 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1246 ; X87-NEXT: fistpl {{[0-9]+}}(%esp)
1247 ; X87-NEXT: fldcw (%esp)
1248 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
1249 ; X87-NEXT: # kill: def $ax killed $ax killed $eax
1250 ; X87-NEXT: addl $8, %esp
1251 ; X87-NEXT: .cfi_def_cfa_offset 4
1254 ; X86-SSE-LABEL: f20u16:
1255 ; X86-SSE: # %bb.0: # %entry
1256 ; X86-SSE-NEXT: cvttsd2si {{[0-9]+}}(%esp), %eax
1257 ; X86-SSE-NEXT: # kill: def $ax killed $ax killed $eax
1258 ; X86-SSE-NEXT: retl
1260 ; SSE-LABEL: f20u16:
1261 ; SSE: # %bb.0: # %entry
1262 ; SSE-NEXT: cvttsd2si %xmm0, %eax
1263 ; SSE-NEXT: # kill: def $ax killed $ax killed $eax
1266 ; AVX-LABEL: f20u16:
1267 ; AVX: # %bb.0: # %entry
1268 ; AVX-NEXT: vcvttsd2si %xmm0, %eax
1269 ; AVX-NEXT: # kill: def $ax killed $ax killed $eax
1272 %result = call i16 @llvm.experimental.constrained.fptoui.i16.f64(double %x,
1273 metadata !"fpexcept.strict") #0
1277 ; Verify that fptoui(%x) isn't simplified when the rounding mode is
1279 ; Verify that no gross errors happen.
1280 ; FIXME: The X87/SSE/AVX1 code does not raise an invalid exception for all
1281 ; values that don't fit in i32. The AVX512 code does.
1282 define i32 @f20u(double %x) #0 {
1284 ; X87: # %bb.0: # %entry
1285 ; X87-NEXT: subl $20, %esp
1286 ; X87-NEXT: .cfi_def_cfa_offset 24
1287 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1289 ; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
1290 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
1291 ; X87-NEXT: orl $3072, %eax # imm = 0xC00
1292 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
1293 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1294 ; X87-NEXT: fistpll {{[0-9]+}}(%esp)
1295 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1296 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
1297 ; X87-NEXT: addl $20, %esp
1298 ; X87-NEXT: .cfi_def_cfa_offset 4
1301 ; X86-SSE-LABEL: f20u:
1302 ; X86-SSE: # %bb.0: # %entry
1303 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1304 ; X86-SSE-NEXT: movsd {{.*#+}} xmm2 = [2.147483648E+9,0.0E+0]
1305 ; X86-SSE-NEXT: comisd %xmm0, %xmm2
1306 ; X86-SSE-NEXT: xorpd %xmm1, %xmm1
1307 ; X86-SSE-NEXT: ja .LBB24_2
1308 ; X86-SSE-NEXT: # %bb.1: # %entry
1309 ; X86-SSE-NEXT: movapd %xmm2, %xmm1
1310 ; X86-SSE-NEXT: .LBB24_2: # %entry
1311 ; X86-SSE-NEXT: setbe %al
1312 ; X86-SSE-NEXT: movzbl %al, %ecx
1313 ; X86-SSE-NEXT: shll $31, %ecx
1314 ; X86-SSE-NEXT: subsd %xmm1, %xmm0
1315 ; X86-SSE-NEXT: cvttsd2si %xmm0, %eax
1316 ; X86-SSE-NEXT: xorl %ecx, %eax
1317 ; X86-SSE-NEXT: retl
1320 ; SSE: # %bb.0: # %entry
1321 ; SSE-NEXT: cvttsd2si %xmm0, %rax
1322 ; SSE-NEXT: # kill: def $eax killed $eax killed $rax
1326 ; AVX1: # %bb.0: # %entry
1327 ; AVX1-NEXT: vcvttsd2si %xmm0, %rax
1328 ; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
1331 ; AVX512-LABEL: f20u:
1332 ; AVX512: # %bb.0: # %entry
1333 ; AVX512-NEXT: vcvttsd2usi %xmm0, %eax
1336 %result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x,
1337 metadata !"fpexcept.strict") #0
1341 ; Verify that fptoui(%x) isn't simplified when the rounding mode is
1343 ; Verify that no gross errors happen.
1344 ; FIXME: This code generates spurious inexact exceptions.
1345 define i64 @f20u64(double %x) #0 {
1346 ; X87-LABEL: f20u64:
1347 ; X87: # %bb.0: # %entry
1348 ; X87-NEXT: subl $20, %esp
1349 ; X87-NEXT: .cfi_def_cfa_offset 24
1350 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1351 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
1353 ; X87-NEXT: xorl %edx, %edx
1354 ; X87-NEXT: fcomi %st(1), %st
1356 ; X87-NEXT: setbe %dl
1358 ; X87-NEXT: fcmovbe %st(1), %st
1359 ; X87-NEXT: fstp %st(1)
1360 ; X87-NEXT: fsubrp %st, %st(1)
1362 ; X87-NEXT: fnstcw {{[0-9]+}}(%esp)
1363 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
1364 ; X87-NEXT: orl $3072, %eax # imm = 0xC00
1365 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
1366 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1367 ; X87-NEXT: fistpll {{[0-9]+}}(%esp)
1368 ; X87-NEXT: fldcw {{[0-9]+}}(%esp)
1369 ; X87-NEXT: shll $31, %edx
1370 ; X87-NEXT: xorl {{[0-9]+}}(%esp), %edx
1371 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
1372 ; X87-NEXT: addl $20, %esp
1373 ; X87-NEXT: .cfi_def_cfa_offset 4
1376 ; X86-SSE-LABEL: f20u64:
1377 ; X86-SSE: # %bb.0: # %entry
1378 ; X86-SSE-NEXT: subl $20, %esp
1379 ; X86-SSE-NEXT: .cfi_def_cfa_offset 24
1380 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1381 ; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
1382 ; X86-SSE-NEXT: comisd %xmm0, %xmm1
1383 ; X86-SSE-NEXT: jbe .LBB25_2
1384 ; X86-SSE-NEXT: # %bb.1: # %entry
1385 ; X86-SSE-NEXT: xorpd %xmm1, %xmm1
1386 ; X86-SSE-NEXT: .LBB25_2: # %entry
1387 ; X86-SSE-NEXT: subsd %xmm1, %xmm0
1388 ; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
1389 ; X86-SSE-NEXT: setbe %al
1390 ; X86-SSE-NEXT: fldl {{[0-9]+}}(%esp)
1391 ; X86-SSE-NEXT: wait
1392 ; X86-SSE-NEXT: fnstcw {{[0-9]+}}(%esp)
1393 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
1394 ; X86-SSE-NEXT: orl $3072, %ecx # imm = 0xC00
1395 ; X86-SSE-NEXT: movw %cx, {{[0-9]+}}(%esp)
1396 ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
1397 ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
1398 ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
1399 ; X86-SSE-NEXT: movzbl %al, %edx
1400 ; X86-SSE-NEXT: shll $31, %edx
1401 ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx
1402 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
1403 ; X86-SSE-NEXT: addl $20, %esp
1404 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1405 ; X86-SSE-NEXT: retl
1407 ; SSE-LABEL: f20u64:
1408 ; SSE: # %bb.0: # %entry
1409 ; SSE-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
1410 ; SSE-NEXT: comisd %xmm2, %xmm0
1411 ; SSE-NEXT: xorpd %xmm1, %xmm1
1412 ; SSE-NEXT: jb .LBB25_2
1413 ; SSE-NEXT: # %bb.1: # %entry
1414 ; SSE-NEXT: movapd %xmm2, %xmm1
1415 ; SSE-NEXT: .LBB25_2: # %entry
1416 ; SSE-NEXT: subsd %xmm1, %xmm0
1417 ; SSE-NEXT: cvttsd2si %xmm0, %rcx
1418 ; SSE-NEXT: setae %al
1419 ; SSE-NEXT: movzbl %al, %eax
1420 ; SSE-NEXT: shlq $63, %rax
1421 ; SSE-NEXT: xorq %rcx, %rax
1424 ; AVX1-LABEL: f20u64:
1425 ; AVX1: # %bb.0: # %entry
1426 ; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = [9.2233720368547758E+18,0.0E+0]
1427 ; AVX1-NEXT: vcomisd %xmm1, %xmm0
1428 ; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
1429 ; AVX1-NEXT: jb .LBB25_2
1430 ; AVX1-NEXT: # %bb.1: # %entry
1431 ; AVX1-NEXT: vmovapd %xmm1, %xmm2
1432 ; AVX1-NEXT: .LBB25_2: # %entry
1433 ; AVX1-NEXT: vsubsd %xmm2, %xmm0, %xmm0
1434 ; AVX1-NEXT: vcvttsd2si %xmm0, %rcx
1435 ; AVX1-NEXT: setae %al
1436 ; AVX1-NEXT: movzbl %al, %eax
1437 ; AVX1-NEXT: shlq $63, %rax
1438 ; AVX1-NEXT: xorq %rcx, %rax
1441 ; AVX512-LABEL: f20u64:
1442 ; AVX512: # %bb.0: # %entry
1443 ; AVX512-NEXT: vcvttsd2usi %xmm0, %rax
1446 %result = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x,
1447 metadata !"fpexcept.strict") #0
1452 ; Verify that fptoui(%x) isn't simplified when the rounding mode is
1454 ; Verify that no gross errors happen.
1455 define i128 @f20u128(double %x) nounwind strictfp {
1456 ; X87-LABEL: f20u128:
1457 ; X87: # %bb.0: # %entry
1458 ; X87-NEXT: pushl %edi
1459 ; X87-NEXT: pushl %esi
1460 ; X87-NEXT: subl $36, %esp
1461 ; X87-NEXT: movl {{[0-9]+}}(%esp), %esi
1462 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1463 ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
1465 ; X87-NEXT: leal {{[0-9]+}}(%esp), %eax
1466 ; X87-NEXT: movl %eax, (%esp)
1467 ; X87-NEXT: calll __fixunsdfti
1468 ; X87-NEXT: subl $4, %esp
1469 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
1470 ; X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
1471 ; X87-NEXT: movl {{[0-9]+}}(%esp), %edx
1472 ; X87-NEXT: movl {{[0-9]+}}(%esp), %edi
1473 ; X87-NEXT: movl %edi, 8(%esi)
1474 ; X87-NEXT: movl %edx, 12(%esi)
1475 ; X87-NEXT: movl %eax, (%esi)
1476 ; X87-NEXT: movl %ecx, 4(%esi)
1477 ; X87-NEXT: movl %esi, %eax
1478 ; X87-NEXT: addl $36, %esp
1479 ; X87-NEXT: popl %esi
1480 ; X87-NEXT: popl %edi
1483 ; X86-SSE-LABEL: f20u128:
1484 ; X86-SSE: # %bb.0: # %entry
1485 ; X86-SSE-NEXT: pushl %esi
1486 ; X86-SSE-NEXT: subl $40, %esp
1487 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
1488 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1489 ; X86-SSE-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
1490 ; X86-SSE-NEXT: leal {{[0-9]+}}(%esp), %eax
1491 ; X86-SSE-NEXT: movl %eax, (%esp)
1492 ; X86-SSE-NEXT: calll __fixunsdfti
1493 ; X86-SSE-NEXT: subl $4, %esp
1494 ; X86-SSE-NEXT: movaps {{[0-9]+}}(%esp), %xmm0
1495 ; X86-SSE-NEXT: movaps %xmm0, (%esi)
1496 ; X86-SSE-NEXT: movl %esi, %eax
1497 ; X86-SSE-NEXT: addl $40, %esp
1498 ; X86-SSE-NEXT: popl %esi
1499 ; X86-SSE-NEXT: retl $4
1501 ; SSE-LABEL: f20u128:
1502 ; SSE: # %bb.0: # %entry
1503 ; SSE-NEXT: pushq %rax
1504 ; SSE-NEXT: callq __fixunsdfti@PLT
1505 ; SSE-NEXT: popq %rcx
1508 ; AVX-LABEL: f20u128:
1509 ; AVX: # %bb.0: # %entry
1510 ; AVX-NEXT: pushq %rax
1511 ; AVX-NEXT: callq __fixunsdfti@PLT
1512 ; AVX-NEXT: popq %rcx
1515 %result = call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %x,
1516 metadata !"fpexcept.strict") #0
1520 ; Verify that round(42.1) isn't simplified when the rounding mode is
1522 ; Verify that no gross errors happen.
1523 define float @f21() #0 {
1525 ; X87: # %bb.0: # %entry
1526 ; X87-NEXT: pushl %eax
1527 ; X87-NEXT: .cfi_def_cfa_offset 8
1528 ; X87-NEXT: fldl {{\.?LCPI[0-9]+_[0-9]+}}
1529 ; X87-NEXT: fstps (%esp)
1530 ; X87-NEXT: flds (%esp)
1532 ; X87-NEXT: popl %eax
1533 ; X87-NEXT: .cfi_def_cfa_offset 4
1536 ; X86-SSE-LABEL: f21:
1537 ; X86-SSE: # %bb.0: # %entry
1538 ; X86-SSE-NEXT: pushl %eax
1539 ; X86-SSE-NEXT: .cfi_def_cfa_offset 8
1540 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
1541 ; X86-SSE-NEXT: cvtsd2ss %xmm0, %xmm0
1542 ; X86-SSE-NEXT: movss %xmm0, (%esp)
1543 ; X86-SSE-NEXT: flds (%esp)
1544 ; X86-SSE-NEXT: wait
1545 ; X86-SSE-NEXT: popl %eax
1546 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1547 ; X86-SSE-NEXT: retl
1550 ; SSE: # %bb.0: # %entry
1551 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
1552 ; SSE-NEXT: cvtsd2ss %xmm0, %xmm0
1556 ; AVX: # %bb.0: # %entry
1557 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
1558 ; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
1561 %result = call float @llvm.experimental.constrained.fptrunc.f32.f64(
1563 metadata !"round.dynamic",
1564 metadata !"fpexcept.strict") #0
1568 define double @f22(float %x) #0 {
1570 ; X87: # %bb.0: # %entry
1571 ; X87-NEXT: flds {{[0-9]+}}(%esp)
1575 ; X86-SSE-LABEL: f22:
1576 ; X86-SSE: # %bb.0: # %entry
1577 ; X86-SSE-NEXT: subl $12, %esp
1578 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1579 ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1580 ; X86-SSE-NEXT: cvtss2sd %xmm0, %xmm0
1581 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
1582 ; X86-SSE-NEXT: fldl (%esp)
1583 ; X86-SSE-NEXT: wait
1584 ; X86-SSE-NEXT: addl $12, %esp
1585 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1586 ; X86-SSE-NEXT: retl
1589 ; SSE: # %bb.0: # %entry
1590 ; SSE-NEXT: cvtss2sd %xmm0, %xmm0
1594 ; AVX: # %bb.0: # %entry
1595 ; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
1598 %result = call double @llvm.experimental.constrained.fpext.f64.f32(float %x,
1599 metadata !"fpexcept.strict") #0
1603 define i32 @f23(double %x) #0 {
1605 ; X87: # %bb.0: # %entry
1606 ; X87-NEXT: subl $12, %esp
1607 ; X87-NEXT: .cfi_def_cfa_offset 16
1608 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1609 ; X87-NEXT: fstpl (%esp)
1611 ; X87-NEXT: calll lrint
1612 ; X87-NEXT: addl $12, %esp
1613 ; X87-NEXT: .cfi_def_cfa_offset 4
1616 ; X86-SSE-LABEL: f23:
1617 ; X86-SSE: # %bb.0: # %entry
1618 ; X86-SSE-NEXT: subl $12, %esp
1619 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1620 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1621 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
1622 ; X86-SSE-NEXT: calll lrint
1623 ; X86-SSE-NEXT: addl $12, %esp
1624 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1625 ; X86-SSE-NEXT: retl
1628 ; SSE: # %bb.0: # %entry
1629 ; SSE-NEXT: pushq %rax
1630 ; SSE-NEXT: .cfi_def_cfa_offset 16
1631 ; SSE-NEXT: callq lrint@PLT
1632 ; SSE-NEXT: popq %rcx
1633 ; SSE-NEXT: .cfi_def_cfa_offset 8
1637 ; AVX: # %bb.0: # %entry
1638 ; AVX-NEXT: pushq %rax
1639 ; AVX-NEXT: .cfi_def_cfa_offset 16
1640 ; AVX-NEXT: callq lrint@PLT
1641 ; AVX-NEXT: popq %rcx
1642 ; AVX-NEXT: .cfi_def_cfa_offset 8
1645 %result = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x,
1646 metadata !"round.dynamic",
1647 metadata !"fpexcept.strict") #0
1651 define i32 @f24(float %x) #0 {
1653 ; X87: # %bb.0: # %entry
1654 ; X87-NEXT: subl $12, %esp
1655 ; X87-NEXT: .cfi_def_cfa_offset 16
1656 ; X87-NEXT: flds {{[0-9]+}}(%esp)
1657 ; X87-NEXT: fstps (%esp)
1659 ; X87-NEXT: calll lrintf
1660 ; X87-NEXT: addl $12, %esp
1661 ; X87-NEXT: .cfi_def_cfa_offset 4
1664 ; X86-SSE-LABEL: f24:
1665 ; X86-SSE: # %bb.0: # %entry
1666 ; X86-SSE-NEXT: subl $12, %esp
1667 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1668 ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1669 ; X86-SSE-NEXT: movss %xmm0, (%esp)
1670 ; X86-SSE-NEXT: calll lrintf
1671 ; X86-SSE-NEXT: addl $12, %esp
1672 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1673 ; X86-SSE-NEXT: retl
1676 ; SSE: # %bb.0: # %entry
1677 ; SSE-NEXT: pushq %rax
1678 ; SSE-NEXT: .cfi_def_cfa_offset 16
1679 ; SSE-NEXT: callq lrintf@PLT
1680 ; SSE-NEXT: popq %rcx
1681 ; SSE-NEXT: .cfi_def_cfa_offset 8
1685 ; AVX: # %bb.0: # %entry
1686 ; AVX-NEXT: pushq %rax
1687 ; AVX-NEXT: .cfi_def_cfa_offset 16
1688 ; AVX-NEXT: callq lrintf@PLT
1689 ; AVX-NEXT: popq %rcx
1690 ; AVX-NEXT: .cfi_def_cfa_offset 8
1693 %result = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x,
1694 metadata !"round.dynamic",
1695 metadata !"fpexcept.strict") #0
1699 define i64 @f25(double %x) #0 {
1701 ; X87: # %bb.0: # %entry
1702 ; X87-NEXT: subl $12, %esp
1703 ; X87-NEXT: .cfi_def_cfa_offset 16
1704 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1705 ; X87-NEXT: fstpl (%esp)
1707 ; X87-NEXT: calll llrint
1708 ; X87-NEXT: addl $12, %esp
1709 ; X87-NEXT: .cfi_def_cfa_offset 4
1712 ; X86-SSE-LABEL: f25:
1713 ; X86-SSE: # %bb.0: # %entry
1714 ; X86-SSE-NEXT: subl $12, %esp
1715 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1716 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1717 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
1718 ; X86-SSE-NEXT: calll llrint
1719 ; X86-SSE-NEXT: addl $12, %esp
1720 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1721 ; X86-SSE-NEXT: retl
1724 ; SSE: # %bb.0: # %entry
1725 ; SSE-NEXT: pushq %rax
1726 ; SSE-NEXT: .cfi_def_cfa_offset 16
1727 ; SSE-NEXT: callq llrint@PLT
1728 ; SSE-NEXT: popq %rcx
1729 ; SSE-NEXT: .cfi_def_cfa_offset 8
1733 ; AVX: # %bb.0: # %entry
1734 ; AVX-NEXT: pushq %rax
1735 ; AVX-NEXT: .cfi_def_cfa_offset 16
1736 ; AVX-NEXT: callq llrint@PLT
1737 ; AVX-NEXT: popq %rcx
1738 ; AVX-NEXT: .cfi_def_cfa_offset 8
1741 %result = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x,
1742 metadata !"round.dynamic",
1743 metadata !"fpexcept.strict") #0
1747 define i64 @f26(float %x) #0 {
1749 ; X87: # %bb.0: # %entry
1750 ; X87-NEXT: subl $12, %esp
1751 ; X87-NEXT: .cfi_def_cfa_offset 16
1752 ; X87-NEXT: flds {{[0-9]+}}(%esp)
1753 ; X87-NEXT: fstps (%esp)
1755 ; X87-NEXT: calll llrintf
1756 ; X87-NEXT: addl $12, %esp
1757 ; X87-NEXT: .cfi_def_cfa_offset 4
1760 ; X86-SSE-LABEL: f26:
1761 ; X86-SSE: # %bb.0: # %entry
1762 ; X86-SSE-NEXT: subl $12, %esp
1763 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1764 ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1765 ; X86-SSE-NEXT: movss %xmm0, (%esp)
1766 ; X86-SSE-NEXT: calll llrintf
1767 ; X86-SSE-NEXT: addl $12, %esp
1768 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1769 ; X86-SSE-NEXT: retl
1772 ; SSE: # %bb.0: # %entry
1773 ; SSE-NEXT: pushq %rax
1774 ; SSE-NEXT: .cfi_def_cfa_offset 16
1775 ; SSE-NEXT: callq llrintf@PLT
1776 ; SSE-NEXT: popq %rcx
1777 ; SSE-NEXT: .cfi_def_cfa_offset 8
1781 ; AVX: # %bb.0: # %entry
1782 ; AVX-NEXT: pushq %rax
1783 ; AVX-NEXT: .cfi_def_cfa_offset 16
1784 ; AVX-NEXT: callq llrintf@PLT
1785 ; AVX-NEXT: popq %rcx
1786 ; AVX-NEXT: .cfi_def_cfa_offset 8
1789 %result = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x,
1790 metadata !"round.dynamic",
1791 metadata !"fpexcept.strict") #0
1795 define i32 @f27(double %x) #0 {
1797 ; X87: # %bb.0: # %entry
1798 ; X87-NEXT: subl $12, %esp
1799 ; X87-NEXT: .cfi_def_cfa_offset 16
1800 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1801 ; X87-NEXT: fstpl (%esp)
1803 ; X87-NEXT: calll lround
1804 ; X87-NEXT: addl $12, %esp
1805 ; X87-NEXT: .cfi_def_cfa_offset 4
1808 ; X86-SSE-LABEL: f27:
1809 ; X86-SSE: # %bb.0: # %entry
1810 ; X86-SSE-NEXT: subl $12, %esp
1811 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1812 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1813 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
1814 ; X86-SSE-NEXT: calll lround
1815 ; X86-SSE-NEXT: addl $12, %esp
1816 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1817 ; X86-SSE-NEXT: retl
1820 ; SSE: # %bb.0: # %entry
1821 ; SSE-NEXT: pushq %rax
1822 ; SSE-NEXT: .cfi_def_cfa_offset 16
1823 ; SSE-NEXT: callq lround@PLT
1824 ; SSE-NEXT: popq %rcx
1825 ; SSE-NEXT: .cfi_def_cfa_offset 8
1829 ; AVX: # %bb.0: # %entry
1830 ; AVX-NEXT: pushq %rax
1831 ; AVX-NEXT: .cfi_def_cfa_offset 16
1832 ; AVX-NEXT: callq lround@PLT
1833 ; AVX-NEXT: popq %rcx
1834 ; AVX-NEXT: .cfi_def_cfa_offset 8
1837 %result = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x,
1838 metadata !"fpexcept.strict") #0
1842 define i32 @f28(float %x) #0 {
1844 ; X87: # %bb.0: # %entry
1845 ; X87-NEXT: subl $12, %esp
1846 ; X87-NEXT: .cfi_def_cfa_offset 16
1847 ; X87-NEXT: flds {{[0-9]+}}(%esp)
1848 ; X87-NEXT: fstps (%esp)
1850 ; X87-NEXT: calll lroundf
1851 ; X87-NEXT: addl $12, %esp
1852 ; X87-NEXT: .cfi_def_cfa_offset 4
1855 ; X86-SSE-LABEL: f28:
1856 ; X86-SSE: # %bb.0: # %entry
1857 ; X86-SSE-NEXT: subl $12, %esp
1858 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1859 ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1860 ; X86-SSE-NEXT: movss %xmm0, (%esp)
1861 ; X86-SSE-NEXT: calll lroundf
1862 ; X86-SSE-NEXT: addl $12, %esp
1863 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1864 ; X86-SSE-NEXT: retl
1867 ; SSE: # %bb.0: # %entry
1868 ; SSE-NEXT: pushq %rax
1869 ; SSE-NEXT: .cfi_def_cfa_offset 16
1870 ; SSE-NEXT: callq lroundf@PLT
1871 ; SSE-NEXT: popq %rcx
1872 ; SSE-NEXT: .cfi_def_cfa_offset 8
1876 ; AVX: # %bb.0: # %entry
1877 ; AVX-NEXT: pushq %rax
1878 ; AVX-NEXT: .cfi_def_cfa_offset 16
1879 ; AVX-NEXT: callq lroundf@PLT
1880 ; AVX-NEXT: popq %rcx
1881 ; AVX-NEXT: .cfi_def_cfa_offset 8
1884 %result = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x,
1885 metadata !"fpexcept.strict") #0
1889 define i64 @f29(double %x) #0 {
1891 ; X87: # %bb.0: # %entry
1892 ; X87-NEXT: subl $12, %esp
1893 ; X87-NEXT: .cfi_def_cfa_offset 16
1894 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
1895 ; X87-NEXT: fstpl (%esp)
1897 ; X87-NEXT: calll llround
1898 ; X87-NEXT: addl $12, %esp
1899 ; X87-NEXT: .cfi_def_cfa_offset 4
1902 ; X86-SSE-LABEL: f29:
1903 ; X86-SSE: # %bb.0: # %entry
1904 ; X86-SSE-NEXT: subl $12, %esp
1905 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1906 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1907 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
1908 ; X86-SSE-NEXT: calll llround
1909 ; X86-SSE-NEXT: addl $12, %esp
1910 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1911 ; X86-SSE-NEXT: retl
1914 ; SSE: # %bb.0: # %entry
1915 ; SSE-NEXT: pushq %rax
1916 ; SSE-NEXT: .cfi_def_cfa_offset 16
1917 ; SSE-NEXT: callq llround@PLT
1918 ; SSE-NEXT: popq %rcx
1919 ; SSE-NEXT: .cfi_def_cfa_offset 8
1923 ; AVX: # %bb.0: # %entry
1924 ; AVX-NEXT: pushq %rax
1925 ; AVX-NEXT: .cfi_def_cfa_offset 16
1926 ; AVX-NEXT: callq llround@PLT
1927 ; AVX-NEXT: popq %rcx
1928 ; AVX-NEXT: .cfi_def_cfa_offset 8
1931 %result = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x,
1932 metadata !"fpexcept.strict") #0
1936 define i64 @f30(float %x) #0 {
1938 ; X87: # %bb.0: # %entry
1939 ; X87-NEXT: subl $12, %esp
1940 ; X87-NEXT: .cfi_def_cfa_offset 16
1941 ; X87-NEXT: flds {{[0-9]+}}(%esp)
1942 ; X87-NEXT: fstps (%esp)
1944 ; X87-NEXT: calll llroundf
1945 ; X87-NEXT: addl $12, %esp
1946 ; X87-NEXT: .cfi_def_cfa_offset 4
1949 ; X86-SSE-LABEL: f30:
1950 ; X86-SSE: # %bb.0: # %entry
1951 ; X86-SSE-NEXT: subl $12, %esp
1952 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
1953 ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1954 ; X86-SSE-NEXT: movss %xmm0, (%esp)
1955 ; X86-SSE-NEXT: calll llroundf
1956 ; X86-SSE-NEXT: addl $12, %esp
1957 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
1958 ; X86-SSE-NEXT: retl
1961 ; SSE: # %bb.0: # %entry
1962 ; SSE-NEXT: pushq %rax
1963 ; SSE-NEXT: .cfi_def_cfa_offset 16
1964 ; SSE-NEXT: callq llroundf@PLT
1965 ; SSE-NEXT: popq %rcx
1966 ; SSE-NEXT: .cfi_def_cfa_offset 8
1970 ; AVX: # %bb.0: # %entry
1971 ; AVX-NEXT: pushq %rax
1972 ; AVX-NEXT: .cfi_def_cfa_offset 16
1973 ; AVX-NEXT: callq llroundf@PLT
1974 ; AVX-NEXT: popq %rcx
1975 ; AVX-NEXT: .cfi_def_cfa_offset 8
1978 %result = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x,
1979 metadata !"fpexcept.strict") #0
1983 ; Verify that sitofp(%x) isn't simplified when the rounding mode is
1985 ; Verify that no gross errors happen.
1986 define double @sifdb(i8 %x) #0 {
1988 ; X87: # %bb.0: # %entry
1989 ; X87-NEXT: pushl %eax
1990 ; X87-NEXT: .cfi_def_cfa_offset 8
1991 ; X87-NEXT: movsbl {{[0-9]+}}(%esp), %eax
1992 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
1993 ; X87-NEXT: filds {{[0-9]+}}(%esp)
1995 ; X87-NEXT: popl %eax
1996 ; X87-NEXT: .cfi_def_cfa_offset 4
1999 ; X86-SSE-LABEL: sifdb:
2000 ; X86-SSE: # %bb.0: # %entry
2001 ; X86-SSE-NEXT: subl $12, %esp
2002 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2003 ; X86-SSE-NEXT: movsbl {{[0-9]+}}(%esp), %eax
2004 ; X86-SSE-NEXT: cvtsi2sd %eax, %xmm0
2005 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2006 ; X86-SSE-NEXT: fldl (%esp)
2007 ; X86-SSE-NEXT: wait
2008 ; X86-SSE-NEXT: addl $12, %esp
2009 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2010 ; X86-SSE-NEXT: retl
2013 ; SSE: # %bb.0: # %entry
2014 ; SSE-NEXT: movsbl %dil, %eax
2015 ; SSE-NEXT: cvtsi2sd %eax, %xmm0
2019 ; AVX: # %bb.0: # %entry
2020 ; AVX-NEXT: movsbl %dil, %eax
2021 ; AVX-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0
2024 %result = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %x,
2025 metadata !"round.dynamic",
2026 metadata !"fpexcept.strict") #0
2030 define double @sifdw(i16 %x) #0 {
2032 ; X87: # %bb.0: # %entry
2033 ; X87-NEXT: pushl %eax
2034 ; X87-NEXT: .cfi_def_cfa_offset 8
2035 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
2036 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
2037 ; X87-NEXT: filds {{[0-9]+}}(%esp)
2039 ; X87-NEXT: popl %eax
2040 ; X87-NEXT: .cfi_def_cfa_offset 4
2043 ; X86-SSE-LABEL: sifdw:
2044 ; X86-SSE: # %bb.0: # %entry
2045 ; X86-SSE-NEXT: subl $12, %esp
2046 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2047 ; X86-SSE-NEXT: movswl {{[0-9]+}}(%esp), %eax
2048 ; X86-SSE-NEXT: cvtsi2sd %eax, %xmm0
2049 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2050 ; X86-SSE-NEXT: fldl (%esp)
2051 ; X86-SSE-NEXT: wait
2052 ; X86-SSE-NEXT: addl $12, %esp
2053 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2054 ; X86-SSE-NEXT: retl
2057 ; SSE: # %bb.0: # %entry
2058 ; SSE-NEXT: movswl %di, %eax
2059 ; SSE-NEXT: cvtsi2sd %eax, %xmm0
2063 ; AVX: # %bb.0: # %entry
2064 ; AVX-NEXT: movswl %di, %eax
2065 ; AVX-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0
2068 %result = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %x,
2069 metadata !"round.dynamic",
2070 metadata !"fpexcept.strict") #0
2074 define double @sifdi(i32 %x) #0 {
2076 ; X87: # %bb.0: # %entry
2077 ; X87-NEXT: pushl %eax
2078 ; X87-NEXT: .cfi_def_cfa_offset 8
2079 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
2080 ; X87-NEXT: movl %eax, (%esp)
2081 ; X87-NEXT: fildl (%esp)
2083 ; X87-NEXT: popl %eax
2084 ; X87-NEXT: .cfi_def_cfa_offset 4
2087 ; X86-SSE-LABEL: sifdi:
2088 ; X86-SSE: # %bb.0: # %entry
2089 ; X86-SSE-NEXT: subl $12, %esp
2090 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2091 ; X86-SSE-NEXT: cvtsi2sdl {{[0-9]+}}(%esp), %xmm0
2092 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2093 ; X86-SSE-NEXT: fldl (%esp)
2094 ; X86-SSE-NEXT: wait
2095 ; X86-SSE-NEXT: addl $12, %esp
2096 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2097 ; X86-SSE-NEXT: retl
2100 ; SSE: # %bb.0: # %entry
2101 ; SSE-NEXT: cvtsi2sd %edi, %xmm0
2105 ; AVX: # %bb.0: # %entry
2106 ; AVX-NEXT: vcvtsi2sd %edi, %xmm0, %xmm0
2109 %result = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %x,
2110 metadata !"round.dynamic",
2111 metadata !"fpexcept.strict") #0
2115 define float @siffb(i8 %x) #0 {
2117 ; X87: # %bb.0: # %entry
2118 ; X87-NEXT: pushl %eax
2119 ; X87-NEXT: .cfi_def_cfa_offset 8
2120 ; X87-NEXT: movsbl {{[0-9]+}}(%esp), %eax
2121 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
2122 ; X87-NEXT: filds {{[0-9]+}}(%esp)
2124 ; X87-NEXT: popl %eax
2125 ; X87-NEXT: .cfi_def_cfa_offset 4
2128 ; X86-SSE-LABEL: siffb:
2129 ; X86-SSE: # %bb.0: # %entry
2130 ; X86-SSE-NEXT: pushl %eax
2131 ; X86-SSE-NEXT: .cfi_def_cfa_offset 8
2132 ; X86-SSE-NEXT: movsbl {{[0-9]+}}(%esp), %eax
2133 ; X86-SSE-NEXT: cvtsi2ss %eax, %xmm0
2134 ; X86-SSE-NEXT: movss %xmm0, (%esp)
2135 ; X86-SSE-NEXT: flds (%esp)
2136 ; X86-SSE-NEXT: wait
2137 ; X86-SSE-NEXT: popl %eax
2138 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2139 ; X86-SSE-NEXT: retl
2142 ; SSE: # %bb.0: # %entry
2143 ; SSE-NEXT: movsbl %dil, %eax
2144 ; SSE-NEXT: cvtsi2ss %eax, %xmm0
2148 ; AVX: # %bb.0: # %entry
2149 ; AVX-NEXT: movsbl %dil, %eax
2150 ; AVX-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
2153 %result = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %x,
2154 metadata !"round.dynamic",
2155 metadata !"fpexcept.strict") #0
2159 define float @siffw(i16 %x) #0 {
2161 ; X87: # %bb.0: # %entry
2162 ; X87-NEXT: pushl %eax
2163 ; X87-NEXT: .cfi_def_cfa_offset 8
2164 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
2165 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
2166 ; X87-NEXT: filds {{[0-9]+}}(%esp)
2168 ; X87-NEXT: popl %eax
2169 ; X87-NEXT: .cfi_def_cfa_offset 4
2172 ; X86-SSE-LABEL: siffw:
2173 ; X86-SSE: # %bb.0: # %entry
2174 ; X86-SSE-NEXT: pushl %eax
2175 ; X86-SSE-NEXT: .cfi_def_cfa_offset 8
2176 ; X86-SSE-NEXT: movswl {{[0-9]+}}(%esp), %eax
2177 ; X86-SSE-NEXT: cvtsi2ss %eax, %xmm0
2178 ; X86-SSE-NEXT: movss %xmm0, (%esp)
2179 ; X86-SSE-NEXT: flds (%esp)
2180 ; X86-SSE-NEXT: wait
2181 ; X86-SSE-NEXT: popl %eax
2182 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2183 ; X86-SSE-NEXT: retl
2186 ; SSE: # %bb.0: # %entry
2187 ; SSE-NEXT: movswl %di, %eax
2188 ; SSE-NEXT: cvtsi2ss %eax, %xmm0
2192 ; AVX: # %bb.0: # %entry
2193 ; AVX-NEXT: movswl %di, %eax
2194 ; AVX-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
2197 %result = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %x,
2198 metadata !"round.dynamic",
2199 metadata !"fpexcept.strict") #0
2203 define float @siffi(i32 %x) #0 {
2205 ; X87: # %bb.0: # %entry
2206 ; X87-NEXT: pushl %eax
2207 ; X87-NEXT: .cfi_def_cfa_offset 8
2208 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
2209 ; X87-NEXT: movl %eax, (%esp)
2210 ; X87-NEXT: fildl (%esp)
2212 ; X87-NEXT: popl %eax
2213 ; X87-NEXT: .cfi_def_cfa_offset 4
2216 ; X86-SSE-LABEL: siffi:
2217 ; X86-SSE: # %bb.0: # %entry
2218 ; X86-SSE-NEXT: pushl %eax
2219 ; X86-SSE-NEXT: .cfi_def_cfa_offset 8
2220 ; X86-SSE-NEXT: cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
2221 ; X86-SSE-NEXT: movss %xmm0, (%esp)
2222 ; X86-SSE-NEXT: flds (%esp)
2223 ; X86-SSE-NEXT: wait
2224 ; X86-SSE-NEXT: popl %eax
2225 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2226 ; X86-SSE-NEXT: retl
2229 ; SSE: # %bb.0: # %entry
2230 ; SSE-NEXT: cvtsi2ss %edi, %xmm0
2234 ; AVX: # %bb.0: # %entry
2235 ; AVX-NEXT: vcvtsi2ss %edi, %xmm0, %xmm0
2238 %result = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %x,
2239 metadata !"round.dynamic",
2240 metadata !"fpexcept.strict") #0
2244 define double @sifdl(i64 %x) #0 {
2246 ; X87: # %bb.0: # %entry
2247 ; X87-NEXT: fildll {{[0-9]+}}(%esp)
2251 ; X86-SSE-LABEL: sifdl:
2252 ; X86-SSE: # %bb.0: # %entry
2253 ; X86-SSE-NEXT: subl $12, %esp
2254 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2255 ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
2256 ; X86-SSE-NEXT: fstpl (%esp)
2257 ; X86-SSE-NEXT: fldl (%esp)
2258 ; X86-SSE-NEXT: wait
2259 ; X86-SSE-NEXT: addl $12, %esp
2260 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2261 ; X86-SSE-NEXT: retl
2264 ; SSE: # %bb.0: # %entry
2265 ; SSE-NEXT: cvtsi2sd %rdi, %xmm0
2269 ; AVX: # %bb.0: # %entry
2270 ; AVX-NEXT: vcvtsi2sd %rdi, %xmm0, %xmm0
2273 %result = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %x,
2274 metadata !"round.dynamic",
2275 metadata !"fpexcept.strict") #0
2279 define float @siffl(i64 %x) #0 {
2281 ; X87: # %bb.0: # %entry
2282 ; X87-NEXT: fildll {{[0-9]+}}(%esp)
2286 ; X86-SSE-LABEL: siffl:
2287 ; X86-SSE: # %bb.0: # %entry
2288 ; X86-SSE-NEXT: pushl %eax
2289 ; X86-SSE-NEXT: .cfi_def_cfa_offset 8
2290 ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
2291 ; X86-SSE-NEXT: fstps (%esp)
2292 ; X86-SSE-NEXT: flds (%esp)
2293 ; X86-SSE-NEXT: wait
2294 ; X86-SSE-NEXT: popl %eax
2295 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2296 ; X86-SSE-NEXT: retl
2299 ; SSE: # %bb.0: # %entry
2300 ; SSE-NEXT: cvtsi2ss %rdi, %xmm0
2304 ; AVX: # %bb.0: # %entry
2305 ; AVX-NEXT: vcvtsi2ss %rdi, %xmm0, %xmm0
2308 %result = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %x,
2309 metadata !"round.dynamic",
2310 metadata !"fpexcept.strict") #0
2314 ; Verify that uitofp(%x) isn't simplified when the rounding mode is
2316 ; Verify that no gross errors happen.
2317 define double @uifdb(i8 %x) #0 {
2319 ; X87: # %bb.0: # %entry
2320 ; X87-NEXT: pushl %eax
2321 ; X87-NEXT: .cfi_def_cfa_offset 8
2322 ; X87-NEXT: movzbl {{[0-9]+}}(%esp), %eax
2323 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
2324 ; X87-NEXT: filds {{[0-9]+}}(%esp)
2326 ; X87-NEXT: popl %eax
2327 ; X87-NEXT: .cfi_def_cfa_offset 4
2330 ; X86-SSE-LABEL: uifdb:
2331 ; X86-SSE: # %bb.0: # %entry
2332 ; X86-SSE-NEXT: subl $12, %esp
2333 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2334 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax
2335 ; X86-SSE-NEXT: cvtsi2sd %eax, %xmm0
2336 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2337 ; X86-SSE-NEXT: fldl (%esp)
2338 ; X86-SSE-NEXT: wait
2339 ; X86-SSE-NEXT: addl $12, %esp
2340 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2341 ; X86-SSE-NEXT: retl
2344 ; SSE: # %bb.0: # %entry
2345 ; SSE-NEXT: movzbl %dil, %eax
2346 ; SSE-NEXT: cvtsi2sd %eax, %xmm0
2350 ; AVX: # %bb.0: # %entry
2351 ; AVX-NEXT: movzbl %dil, %eax
2352 ; AVX-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0
2355 %result = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %x,
2356 metadata !"round.dynamic",
2357 metadata !"fpexcept.strict") #0
2361 define double @uifdw(i16 %x) #0 {
2363 ; X87: # %bb.0: # %entry
2364 ; X87-NEXT: pushl %eax
2365 ; X87-NEXT: .cfi_def_cfa_offset 8
2366 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
2367 ; X87-NEXT: movl %eax, (%esp)
2368 ; X87-NEXT: fildl (%esp)
2370 ; X87-NEXT: popl %eax
2371 ; X87-NEXT: .cfi_def_cfa_offset 4
2374 ; X86-SSE-LABEL: uifdw:
2375 ; X86-SSE: # %bb.0: # %entry
2376 ; X86-SSE-NEXT: subl $12, %esp
2377 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2378 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
2379 ; X86-SSE-NEXT: cvtsi2sd %eax, %xmm0
2380 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2381 ; X86-SSE-NEXT: fldl (%esp)
2382 ; X86-SSE-NEXT: wait
2383 ; X86-SSE-NEXT: addl $12, %esp
2384 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2385 ; X86-SSE-NEXT: retl
2388 ; SSE: # %bb.0: # %entry
2389 ; SSE-NEXT: movzwl %di, %eax
2390 ; SSE-NEXT: cvtsi2sd %eax, %xmm0
2394 ; AVX: # %bb.0: # %entry
2395 ; AVX-NEXT: movzwl %di, %eax
2396 ; AVX-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0
2399 %result = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %x,
2400 metadata !"round.dynamic",
2401 metadata !"fpexcept.strict") #0
2405 define double @uifdi(i32 %x) #0 {
2407 ; X87: # %bb.0: # %entry
2408 ; X87-NEXT: subl $12, %esp
2409 ; X87-NEXT: .cfi_def_cfa_offset 16
2410 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
2411 ; X87-NEXT: movl %eax, (%esp)
2412 ; X87-NEXT: movl $0, {{[0-9]+}}(%esp)
2413 ; X87-NEXT: fildll (%esp)
2415 ; X87-NEXT: addl $12, %esp
2416 ; X87-NEXT: .cfi_def_cfa_offset 4
2419 ; X86-SSE-LABEL: uifdi:
2420 ; X86-SSE: # %bb.0: # %entry
2421 ; X86-SSE-NEXT: subl $20, %esp
2422 ; X86-SSE-NEXT: .cfi_def_cfa_offset 24
2423 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
2424 ; X86-SSE-NEXT: movl %eax, (%esp)
2425 ; X86-SSE-NEXT: movl $0, {{[0-9]+}}(%esp)
2426 ; X86-SSE-NEXT: fildll (%esp)
2427 ; X86-SSE-NEXT: fstpl {{[0-9]+}}(%esp)
2428 ; X86-SSE-NEXT: fldl {{[0-9]+}}(%esp)
2429 ; X86-SSE-NEXT: wait
2430 ; X86-SSE-NEXT: addl $20, %esp
2431 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2432 ; X86-SSE-NEXT: retl
2435 ; SSE: # %bb.0: # %entry
2436 ; SSE-NEXT: movl %edi, %eax
2437 ; SSE-NEXT: cvtsi2sd %rax, %xmm0
2440 ; AVX1-LABEL: uifdi:
2441 ; AVX1: # %bb.0: # %entry
2442 ; AVX1-NEXT: movl %edi, %eax
2443 ; AVX1-NEXT: vcvtsi2sd %rax, %xmm0, %xmm0
2446 ; AVX512-LABEL: uifdi:
2447 ; AVX512: # %bb.0: # %entry
2448 ; AVX512-NEXT: vcvtusi2sd %edi, %xmm0, %xmm0
2451 %result = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x,
2452 metadata !"round.dynamic",
2453 metadata !"fpexcept.strict") #0
2457 define double @uifdl(i64 %x) #0 {
2459 ; X87: # %bb.0: # %entry
2460 ; X87-NEXT: subl $20, %esp
2461 ; X87-NEXT: .cfi_def_cfa_offset 24
2462 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
2463 ; X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
2464 ; X87-NEXT: movl %ecx, {{[0-9]+}}(%esp)
2465 ; X87-NEXT: movl %eax, (%esp)
2466 ; X87-NEXT: shrl $31, %ecx
2467 ; X87-NEXT: fildll (%esp)
2468 ; X87-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%ecx,4)
2469 ; X87-NEXT: fstpl {{[0-9]+}}(%esp)
2470 ; X87-NEXT: fldl {{[0-9]+}}(%esp)
2472 ; X87-NEXT: addl $20, %esp
2473 ; X87-NEXT: .cfi_def_cfa_offset 4
2476 ; X86-SSE-LABEL: uifdl:
2477 ; X86-SSE: # %bb.0: # %entry
2478 ; X86-SSE-NEXT: subl $28, %esp
2479 ; X86-SSE-NEXT: .cfi_def_cfa_offset 32
2480 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
2481 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
2482 ; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
2483 ; X86-SSE-NEXT: shrl $31, %eax
2484 ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
2485 ; X86-SSE-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
2486 ; X86-SSE-NEXT: fstpl {{[0-9]+}}(%esp)
2487 ; X86-SSE-NEXT: wait
2488 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
2489 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2490 ; X86-SSE-NEXT: fldl (%esp)
2491 ; X86-SSE-NEXT: wait
2492 ; X86-SSE-NEXT: addl $28, %esp
2493 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2494 ; X86-SSE-NEXT: retl
2497 ; SSE: # %bb.0: # %entry
2498 ; SSE-NEXT: movq %rdi, %rax
2499 ; SSE-NEXT: shrq %rax
2500 ; SSE-NEXT: movl %edi, %ecx
2501 ; SSE-NEXT: andl $1, %ecx
2502 ; SSE-NEXT: orq %rax, %rcx
2503 ; SSE-NEXT: testq %rdi, %rdi
2504 ; SSE-NEXT: cmovnsq %rdi, %rcx
2505 ; SSE-NEXT: cvtsi2sd %rcx, %xmm0
2506 ; SSE-NEXT: jns .LBB48_2
2507 ; SSE-NEXT: # %bb.1:
2508 ; SSE-NEXT: addsd %xmm0, %xmm0
2509 ; SSE-NEXT: .LBB48_2: # %entry
2512 ; AVX1-LABEL: uifdl:
2513 ; AVX1: # %bb.0: # %entry
2514 ; AVX1-NEXT: movq %rdi, %rax
2515 ; AVX1-NEXT: shrq %rax
2516 ; AVX1-NEXT: movl %edi, %ecx
2517 ; AVX1-NEXT: andl $1, %ecx
2518 ; AVX1-NEXT: orq %rax, %rcx
2519 ; AVX1-NEXT: testq %rdi, %rdi
2520 ; AVX1-NEXT: cmovnsq %rdi, %rcx
2521 ; AVX1-NEXT: vcvtsi2sd %rcx, %xmm0, %xmm0
2522 ; AVX1-NEXT: jns .LBB48_2
2523 ; AVX1-NEXT: # %bb.1:
2524 ; AVX1-NEXT: vaddsd %xmm0, %xmm0, %xmm0
2525 ; AVX1-NEXT: .LBB48_2: # %entry
2528 ; AVX512-LABEL: uifdl:
2529 ; AVX512: # %bb.0: # %entry
2530 ; AVX512-NEXT: vcvtusi2sd %rdi, %xmm0, %xmm0
2533 %result = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %x,
2534 metadata !"round.dynamic",
2535 metadata !"fpexcept.strict") #0
2539 define float @uiffb(i8 %x) #0 {
2541 ; X87: # %bb.0: # %entry
2542 ; X87-NEXT: pushl %eax
2543 ; X87-NEXT: .cfi_def_cfa_offset 8
2544 ; X87-NEXT: movzbl {{[0-9]+}}(%esp), %eax
2545 ; X87-NEXT: movw %ax, {{[0-9]+}}(%esp)
2546 ; X87-NEXT: filds {{[0-9]+}}(%esp)
2548 ; X87-NEXT: popl %eax
2549 ; X87-NEXT: .cfi_def_cfa_offset 4
2552 ; X86-SSE-LABEL: uiffb:
2553 ; X86-SSE: # %bb.0: # %entry
2554 ; X86-SSE-NEXT: pushl %eax
2555 ; X86-SSE-NEXT: .cfi_def_cfa_offset 8
2556 ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax
2557 ; X86-SSE-NEXT: cvtsi2ss %eax, %xmm0
2558 ; X86-SSE-NEXT: movss %xmm0, (%esp)
2559 ; X86-SSE-NEXT: flds (%esp)
2560 ; X86-SSE-NEXT: wait
2561 ; X86-SSE-NEXT: popl %eax
2562 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2563 ; X86-SSE-NEXT: retl
2566 ; SSE: # %bb.0: # %entry
2567 ; SSE-NEXT: movzbl %dil, %eax
2568 ; SSE-NEXT: cvtsi2ss %eax, %xmm0
2572 ; AVX: # %bb.0: # %entry
2573 ; AVX-NEXT: movzbl %dil, %eax
2574 ; AVX-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
2577 %result = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %x,
2578 metadata !"round.dynamic",
2579 metadata !"fpexcept.strict") #0
2583 define float @uiffw(i16 %x) #0 {
2585 ; X87: # %bb.0: # %entry
2586 ; X87-NEXT: pushl %eax
2587 ; X87-NEXT: .cfi_def_cfa_offset 8
2588 ; X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
2589 ; X87-NEXT: movl %eax, (%esp)
2590 ; X87-NEXT: fildl (%esp)
2592 ; X87-NEXT: popl %eax
2593 ; X87-NEXT: .cfi_def_cfa_offset 4
2596 ; X86-SSE-LABEL: uiffw:
2597 ; X86-SSE: # %bb.0: # %entry
2598 ; X86-SSE-NEXT: pushl %eax
2599 ; X86-SSE-NEXT: .cfi_def_cfa_offset 8
2600 ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
2601 ; X86-SSE-NEXT: cvtsi2ss %eax, %xmm0
2602 ; X86-SSE-NEXT: movss %xmm0, (%esp)
2603 ; X86-SSE-NEXT: flds (%esp)
2604 ; X86-SSE-NEXT: wait
2605 ; X86-SSE-NEXT: popl %eax
2606 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2607 ; X86-SSE-NEXT: retl
2610 ; SSE: # %bb.0: # %entry
2611 ; SSE-NEXT: movzwl %di, %eax
2612 ; SSE-NEXT: cvtsi2ss %eax, %xmm0
2616 ; AVX: # %bb.0: # %entry
2617 ; AVX-NEXT: movzwl %di, %eax
2618 ; AVX-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0
2621 %result = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %x,
2622 metadata !"round.dynamic",
2623 metadata !"fpexcept.strict") #0
2627 define float @uiffi(i32 %x) #0 {
2629 ; X87: # %bb.0: # %entry
2630 ; X87-NEXT: subl $12, %esp
2631 ; X87-NEXT: .cfi_def_cfa_offset 16
2632 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
2633 ; X87-NEXT: movl %eax, (%esp)
2634 ; X87-NEXT: movl $0, {{[0-9]+}}(%esp)
2635 ; X87-NEXT: fildll (%esp)
2637 ; X87-NEXT: addl $12, %esp
2638 ; X87-NEXT: .cfi_def_cfa_offset 4
2641 ; X86-SSE-LABEL: uiffi:
2642 ; X86-SSE: # %bb.0: # %entry
2643 ; X86-SSE-NEXT: subl $20, %esp
2644 ; X86-SSE-NEXT: .cfi_def_cfa_offset 24
2645 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
2646 ; X86-SSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
2647 ; X86-SSE-NEXT: movl $0, {{[0-9]+}}(%esp)
2648 ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
2649 ; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp)
2650 ; X86-SSE-NEXT: flds {{[0-9]+}}(%esp)
2651 ; X86-SSE-NEXT: wait
2652 ; X86-SSE-NEXT: addl $20, %esp
2653 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2654 ; X86-SSE-NEXT: retl
2657 ; SSE: # %bb.0: # %entry
2658 ; SSE-NEXT: movl %edi, %eax
2659 ; SSE-NEXT: cvtsi2ss %rax, %xmm0
2662 ; AVX1-LABEL: uiffi:
2663 ; AVX1: # %bb.0: # %entry
2664 ; AVX1-NEXT: movl %edi, %eax
2665 ; AVX1-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0
2668 ; AVX512-LABEL: uiffi:
2669 ; AVX512: # %bb.0: # %entry
2670 ; AVX512-NEXT: vcvtusi2ss %edi, %xmm0, %xmm0
2673 %result = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x,
2674 metadata !"round.dynamic",
2675 metadata !"fpexcept.strict") #0
2679 define float @uiffl(i64 %x) #0 {
2681 ; X87: # %bb.0: # %entry
2682 ; X87-NEXT: subl $20, %esp
2683 ; X87-NEXT: .cfi_def_cfa_offset 24
2684 ; X87-NEXT: movl {{[0-9]+}}(%esp), %eax
2685 ; X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
2686 ; X87-NEXT: movl %ecx, {{[0-9]+}}(%esp)
2687 ; X87-NEXT: movl %eax, {{[0-9]+}}(%esp)
2688 ; X87-NEXT: shrl $31, %ecx
2689 ; X87-NEXT: fildll {{[0-9]+}}(%esp)
2690 ; X87-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%ecx,4)
2691 ; X87-NEXT: fstps {{[0-9]+}}(%esp)
2692 ; X87-NEXT: flds {{[0-9]+}}(%esp)
2694 ; X87-NEXT: addl $20, %esp
2695 ; X87-NEXT: .cfi_def_cfa_offset 4
2698 ; X86-SSE-LABEL: uiffl:
2699 ; X86-SSE: # %bb.0: # %entry
2700 ; X86-SSE-NEXT: subl $20, %esp
2701 ; X86-SSE-NEXT: .cfi_def_cfa_offset 24
2702 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
2703 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
2704 ; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
2705 ; X86-SSE-NEXT: shrl $31, %eax
2706 ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
2707 ; X86-SSE-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
2708 ; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp)
2709 ; X86-SSE-NEXT: wait
2710 ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
2711 ; X86-SSE-NEXT: movss %xmm0, (%esp)
2712 ; X86-SSE-NEXT: flds (%esp)
2713 ; X86-SSE-NEXT: wait
2714 ; X86-SSE-NEXT: addl $20, %esp
2715 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2716 ; X86-SSE-NEXT: retl
2719 ; SSE: # %bb.0: # %entry
2720 ; SSE-NEXT: movq %rdi, %rax
2721 ; SSE-NEXT: shrq %rax
2722 ; SSE-NEXT: movl %edi, %ecx
2723 ; SSE-NEXT: andl $1, %ecx
2724 ; SSE-NEXT: orq %rax, %rcx
2725 ; SSE-NEXT: testq %rdi, %rdi
2726 ; SSE-NEXT: cmovnsq %rdi, %rcx
2727 ; SSE-NEXT: cvtsi2ss %rcx, %xmm0
2728 ; SSE-NEXT: jns .LBB52_2
2729 ; SSE-NEXT: # %bb.1:
2730 ; SSE-NEXT: addss %xmm0, %xmm0
2731 ; SSE-NEXT: .LBB52_2: # %entry
2734 ; AVX1-LABEL: uiffl:
2735 ; AVX1: # %bb.0: # %entry
2736 ; AVX1-NEXT: movq %rdi, %rax
2737 ; AVX1-NEXT: shrq %rax
2738 ; AVX1-NEXT: movl %edi, %ecx
2739 ; AVX1-NEXT: andl $1, %ecx
2740 ; AVX1-NEXT: orq %rax, %rcx
2741 ; AVX1-NEXT: testq %rdi, %rdi
2742 ; AVX1-NEXT: cmovnsq %rdi, %rcx
2743 ; AVX1-NEXT: vcvtsi2ss %rcx, %xmm0, %xmm0
2744 ; AVX1-NEXT: jns .LBB52_2
2745 ; AVX1-NEXT: # %bb.1:
2746 ; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0
2747 ; AVX1-NEXT: .LBB52_2: # %entry
2750 ; AVX512-LABEL: uiffl:
2751 ; AVX512: # %bb.0: # %entry
2752 ; AVX512-NEXT: vcvtusi2ss %rdi, %xmm0, %xmm0
2755 %result = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %x,
2756 metadata !"round.dynamic",
2757 metadata !"fpexcept.strict") #0
2761 ; Verify that tan(42.0) isn't simplified when the rounding mode is unknown.
2762 define double @ftan() #0 {
2764 ; X87: # %bb.0: # %entry
2765 ; X87-NEXT: subl $12, %esp
2766 ; X87-NEXT: .cfi_def_cfa_offset 16
2767 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
2768 ; X87-NEXT: fstpl (%esp)
2770 ; X87-NEXT: calll tan
2771 ; X87-NEXT: addl $12, %esp
2772 ; X87-NEXT: .cfi_def_cfa_offset 4
2775 ; X86-SSE-LABEL: ftan:
2776 ; X86-SSE: # %bb.0: # %entry
2777 ; X86-SSE-NEXT: subl $12, %esp
2778 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2779 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2780 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2781 ; X86-SSE-NEXT: calll tan
2782 ; X86-SSE-NEXT: addl $12, %esp
2783 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2784 ; X86-SSE-NEXT: retl
2787 ; SSE: # %bb.0: # %entry
2788 ; SSE-NEXT: pushq %rax
2789 ; SSE-NEXT: .cfi_def_cfa_offset 16
2790 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2791 ; SSE-NEXT: callq tan@PLT
2792 ; SSE-NEXT: popq %rax
2793 ; SSE-NEXT: .cfi_def_cfa_offset 8
2797 ; AVX: # %bb.0: # %entry
2798 ; AVX-NEXT: pushq %rax
2799 ; AVX-NEXT: .cfi_def_cfa_offset 16
2800 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2801 ; AVX-NEXT: callq tan@PLT
2802 ; AVX-NEXT: popq %rax
2803 ; AVX-NEXT: .cfi_def_cfa_offset 8
2806 %result = call double @llvm.experimental.constrained.tan.f64(double 42.0,
2807 metadata !"round.dynamic",
2808 metadata !"fpexcept.strict") #0
2812 ; Verify that acos(42.0) isn't simplified when the rounding mode is unknown.
2813 define double @facos() #0 {
2815 ; X87: # %bb.0: # %entry
2816 ; X87-NEXT: subl $12, %esp
2817 ; X87-NEXT: .cfi_def_cfa_offset 16
2818 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
2819 ; X87-NEXT: fstpl (%esp)
2821 ; X87-NEXT: calll acos
2822 ; X87-NEXT: addl $12, %esp
2823 ; X87-NEXT: .cfi_def_cfa_offset 4
2826 ; X86-SSE-LABEL: facos:
2827 ; X86-SSE: # %bb.0: # %entry
2828 ; X86-SSE-NEXT: subl $12, %esp
2829 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2830 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2831 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2832 ; X86-SSE-NEXT: calll acos
2833 ; X86-SSE-NEXT: addl $12, %esp
2834 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2835 ; X86-SSE-NEXT: retl
2838 ; SSE: # %bb.0: # %entry
2839 ; SSE-NEXT: pushq %rax
2840 ; SSE-NEXT: .cfi_def_cfa_offset 16
2841 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2842 ; SSE-NEXT: callq acos@PLT
2843 ; SSE-NEXT: popq %rax
2844 ; SSE-NEXT: .cfi_def_cfa_offset 8
2848 ; AVX: # %bb.0: # %entry
2849 ; AVX-NEXT: pushq %rax
2850 ; AVX-NEXT: .cfi_def_cfa_offset 16
2851 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2852 ; AVX-NEXT: callq acos@PLT
2853 ; AVX-NEXT: popq %rax
2854 ; AVX-NEXT: .cfi_def_cfa_offset 8
2857 %result = call double @llvm.experimental.constrained.acos.f64(double 42.0,
2858 metadata !"round.dynamic",
2859 metadata !"fpexcept.strict") #0
2863 ; Verify that asin(42.0) isn't simplified when the rounding mode is unknown.
2864 define double @fasin() #0 {
2866 ; X87: # %bb.0: # %entry
2867 ; X87-NEXT: subl $12, %esp
2868 ; X87-NEXT: .cfi_def_cfa_offset 16
2869 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
2870 ; X87-NEXT: fstpl (%esp)
2872 ; X87-NEXT: calll asin
2873 ; X87-NEXT: addl $12, %esp
2874 ; X87-NEXT: .cfi_def_cfa_offset 4
2877 ; X86-SSE-LABEL: fasin:
2878 ; X86-SSE: # %bb.0: # %entry
2879 ; X86-SSE-NEXT: subl $12, %esp
2880 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2881 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2882 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2883 ; X86-SSE-NEXT: calll asin
2884 ; X86-SSE-NEXT: addl $12, %esp
2885 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2886 ; X86-SSE-NEXT: retl
2889 ; SSE: # %bb.0: # %entry
2890 ; SSE-NEXT: pushq %rax
2891 ; SSE-NEXT: .cfi_def_cfa_offset 16
2892 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2893 ; SSE-NEXT: callq asin@PLT
2894 ; SSE-NEXT: popq %rax
2895 ; SSE-NEXT: .cfi_def_cfa_offset 8
2899 ; AVX: # %bb.0: # %entry
2900 ; AVX-NEXT: pushq %rax
2901 ; AVX-NEXT: .cfi_def_cfa_offset 16
2902 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2903 ; AVX-NEXT: callq asin@PLT
2904 ; AVX-NEXT: popq %rax
2905 ; AVX-NEXT: .cfi_def_cfa_offset 8
2908 %result = call double @llvm.experimental.constrained.asin.f64(double 42.0,
2909 metadata !"round.dynamic",
2910 metadata !"fpexcept.strict") #0
2914 ; Verify that atan(42.0) isn't simplified when the rounding mode is unknown.
2915 define double @fatan() #0 {
2917 ; X87: # %bb.0: # %entry
2918 ; X87-NEXT: subl $12, %esp
2919 ; X87-NEXT: .cfi_def_cfa_offset 16
2920 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
2921 ; X87-NEXT: fstpl (%esp)
2923 ; X87-NEXT: calll atan
2924 ; X87-NEXT: addl $12, %esp
2925 ; X87-NEXT: .cfi_def_cfa_offset 4
2928 ; X86-SSE-LABEL: fatan:
2929 ; X86-SSE: # %bb.0: # %entry
2930 ; X86-SSE-NEXT: subl $12, %esp
2931 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2932 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2933 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2934 ; X86-SSE-NEXT: calll atan
2935 ; X86-SSE-NEXT: addl $12, %esp
2936 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2937 ; X86-SSE-NEXT: retl
2940 ; SSE: # %bb.0: # %entry
2941 ; SSE-NEXT: pushq %rax
2942 ; SSE-NEXT: .cfi_def_cfa_offset 16
2943 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2944 ; SSE-NEXT: callq atan@PLT
2945 ; SSE-NEXT: popq %rax
2946 ; SSE-NEXT: .cfi_def_cfa_offset 8
2950 ; AVX: # %bb.0: # %entry
2951 ; AVX-NEXT: pushq %rax
2952 ; AVX-NEXT: .cfi_def_cfa_offset 16
2953 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2954 ; AVX-NEXT: callq atan@PLT
2955 ; AVX-NEXT: popq %rax
2956 ; AVX-NEXT: .cfi_def_cfa_offset 8
2959 %result = call double @llvm.experimental.constrained.atan.f64(double 42.0,
2960 metadata !"round.dynamic",
2961 metadata !"fpexcept.strict") #0
2965 ; Verify that cosh(42.0) isn't simplified when the rounding mode is unknown.
2966 define double @fcosh() #0 {
2968 ; X87: # %bb.0: # %entry
2969 ; X87-NEXT: subl $12, %esp
2970 ; X87-NEXT: .cfi_def_cfa_offset 16
2971 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
2972 ; X87-NEXT: fstpl (%esp)
2974 ; X87-NEXT: calll cosh
2975 ; X87-NEXT: addl $12, %esp
2976 ; X87-NEXT: .cfi_def_cfa_offset 4
2979 ; X86-SSE-LABEL: fcosh:
2980 ; X86-SSE: # %bb.0: # %entry
2981 ; X86-SSE-NEXT: subl $12, %esp
2982 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
2983 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2984 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
2985 ; X86-SSE-NEXT: calll cosh
2986 ; X86-SSE-NEXT: addl $12, %esp
2987 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
2988 ; X86-SSE-NEXT: retl
2991 ; SSE: # %bb.0: # %entry
2992 ; SSE-NEXT: pushq %rax
2993 ; SSE-NEXT: .cfi_def_cfa_offset 16
2994 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
2995 ; SSE-NEXT: callq cosh@PLT
2996 ; SSE-NEXT: popq %rax
2997 ; SSE-NEXT: .cfi_def_cfa_offset 8
3001 ; AVX: # %bb.0: # %entry
3002 ; AVX-NEXT: pushq %rax
3003 ; AVX-NEXT: .cfi_def_cfa_offset 16
3004 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
3005 ; AVX-NEXT: callq cosh@PLT
3006 ; AVX-NEXT: popq %rax
3007 ; AVX-NEXT: .cfi_def_cfa_offset 8
3010 %result = call double @llvm.experimental.constrained.cosh.f64(double 42.0,
3011 metadata !"round.dynamic",
3012 metadata !"fpexcept.strict") #0
3016 ; Verify that sinh(42.0) isn't simplified when the rounding mode is unknown.
3017 define double @fsinh() #0 {
3019 ; X87: # %bb.0: # %entry
3020 ; X87-NEXT: subl $12, %esp
3021 ; X87-NEXT: .cfi_def_cfa_offset 16
3022 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
3023 ; X87-NEXT: fstpl (%esp)
3025 ; X87-NEXT: calll sinh
3026 ; X87-NEXT: addl $12, %esp
3027 ; X87-NEXT: .cfi_def_cfa_offset 4
3030 ; X86-SSE-LABEL: fsinh:
3031 ; X86-SSE: # %bb.0: # %entry
3032 ; X86-SSE-NEXT: subl $12, %esp
3033 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
3034 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
3035 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
3036 ; X86-SSE-NEXT: calll sinh
3037 ; X86-SSE-NEXT: addl $12, %esp
3038 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
3039 ; X86-SSE-NEXT: retl
3042 ; SSE: # %bb.0: # %entry
3043 ; SSE-NEXT: pushq %rax
3044 ; SSE-NEXT: .cfi_def_cfa_offset 16
3045 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
3046 ; SSE-NEXT: callq sinh@PLT
3047 ; SSE-NEXT: popq %rax
3048 ; SSE-NEXT: .cfi_def_cfa_offset 8
3052 ; AVX: # %bb.0: # %entry
3053 ; AVX-NEXT: pushq %rax
3054 ; AVX-NEXT: .cfi_def_cfa_offset 16
3055 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
3056 ; AVX-NEXT: callq sinh@PLT
3057 ; AVX-NEXT: popq %rax
3058 ; AVX-NEXT: .cfi_def_cfa_offset 8
3061 %result = call double @llvm.experimental.constrained.sinh.f64(double 42.0,
3062 metadata !"round.dynamic",
3063 metadata !"fpexcept.strict") #0
3067 ; Verify that tanh(42.0) isn't simplified when the rounding mode is unknown.
3068 define double @ftanh() #0 {
3070 ; X87: # %bb.0: # %entry
3071 ; X87-NEXT: subl $12, %esp
3072 ; X87-NEXT: .cfi_def_cfa_offset 16
3073 ; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
3074 ; X87-NEXT: fstpl (%esp)
3076 ; X87-NEXT: calll tanh
3077 ; X87-NEXT: addl $12, %esp
3078 ; X87-NEXT: .cfi_def_cfa_offset 4
3081 ; X86-SSE-LABEL: ftanh:
3082 ; X86-SSE: # %bb.0: # %entry
3083 ; X86-SSE-NEXT: subl $12, %esp
3084 ; X86-SSE-NEXT: .cfi_def_cfa_offset 16
3085 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
3086 ; X86-SSE-NEXT: movsd %xmm0, (%esp)
3087 ; X86-SSE-NEXT: calll tanh
3088 ; X86-SSE-NEXT: addl $12, %esp
3089 ; X86-SSE-NEXT: .cfi_def_cfa_offset 4
3090 ; X86-SSE-NEXT: retl
3093 ; SSE: # %bb.0: # %entry
3094 ; SSE-NEXT: pushq %rax
3095 ; SSE-NEXT: .cfi_def_cfa_offset 16
3096 ; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
3097 ; SSE-NEXT: callq tanh@PLT
3098 ; SSE-NEXT: popq %rax
3099 ; SSE-NEXT: .cfi_def_cfa_offset 8
3103 ; AVX: # %bb.0: # %entry
3104 ; AVX-NEXT: pushq %rax
3105 ; AVX-NEXT: .cfi_def_cfa_offset 16
3106 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
3107 ; AVX-NEXT: callq tanh@PLT
3108 ; AVX-NEXT: popq %rax
3109 ; AVX-NEXT: .cfi_def_cfa_offset 8
3112 %result = call double @llvm.experimental.constrained.tanh.f64(double 42.0,
3113 metadata !"round.dynamic",
3114 metadata !"fpexcept.strict") #0
3118 attributes #0 = { strictfp }
3120 @llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
3121 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
3122 declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
3123 declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
3124 declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
3125 declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
3126 declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
3127 declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
3128 declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
3129 declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
3130 declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
3131 declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
3132 declare double @llvm.experimental.constrained.asin.f64(double, metadata, metadata)
3133 declare double @llvm.experimental.constrained.acos.f64(double, metadata, metadata)
3134 declare double @llvm.experimental.constrained.atan.f64(double, metadata, metadata)
3135 declare double @llvm.experimental.constrained.sinh.f64(double, metadata, metadata)
3136 declare double @llvm.experimental.constrained.cosh.f64(double, metadata, metadata)
3137 declare double @llvm.experimental.constrained.tanh.f64(double, metadata, metadata)
3138 declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
3139 declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
3140 declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
3141 declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
3142 declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
3143 declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
3144 declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
3145 declare i8 @llvm.experimental.constrained.fptosi.i8.f64(double, metadata)
3146 declare i16 @llvm.experimental.constrained.fptosi.i16.f64(double, metadata)
3147 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
3148 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
3149 declare i128 @llvm.experimental.constrained.fptosi.i128.f64(double, metadata)
3150 declare i8 @llvm.experimental.constrained.fptoui.i8.f64(double, metadata)
3151 declare i16 @llvm.experimental.constrained.fptoui.i16.f64(double, metadata)
3152 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
3153 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
3154 declare i128 @llvm.experimental.constrained.fptoui.i128.f64(double, metadata)
3155 declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
3156 declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
3157 declare i32 @llvm.experimental.constrained.lrint.i32.f64(double, metadata, metadata)
3158 declare i32 @llvm.experimental.constrained.lrint.i32.f32(float, metadata, metadata)
3159 declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
3160 declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
3161 declare i32 @llvm.experimental.constrained.lround.i32.f64(double, metadata)
3162 declare i32 @llvm.experimental.constrained.lround.i32.f32(float, metadata)
3163 declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
3164 declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
3165 declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
3166 declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
3167 declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
3168 declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
3169 declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
3170 declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
3171 declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
3172 declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
3173 declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
3174 declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
3175 declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
3176 declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
3177 declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata)
3178 declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
3179 declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
3180 declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)