1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -O3 | FileCheck %s --check-prefixes=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=X64
5 define x86_fp80 @fma(x86_fp80 %x, x86_fp80 %y, x86_fp80 %z) nounwind strictfp {
7 ; X86: # %bb.0: # %entry
8 ; X86-NEXT: subl $36, %esp
9 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
10 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
11 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
12 ; X86-NEXT: fstpt {{[0-9]+}}(%esp)
13 ; X86-NEXT: fstpt {{[0-9]+}}(%esp)
14 ; X86-NEXT: fstpt (%esp)
16 ; X86-NEXT: calll fmal
17 ; X86-NEXT: addl $36, %esp
21 ; X64: # %bb.0: # %entry
22 ; X64-NEXT: subq $56, %rsp
23 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
24 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
25 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
26 ; X64-NEXT: fstpt {{[0-9]+}}(%rsp)
27 ; X64-NEXT: fstpt {{[0-9]+}}(%rsp)
28 ; X64-NEXT: fstpt (%rsp)
30 ; X64-NEXT: callq fmal@PLT
31 ; X64-NEXT: addq $56, %rsp
34 %fma = call x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80 %x, x86_fp80 %y, x86_fp80 %z, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
38 define x86_fp80 @frem(x86_fp80 %x, x86_fp80 %y) nounwind strictfp {
40 ; X86: # %bb.0: # %entry
41 ; X86-NEXT: subl $24, %esp
42 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
43 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
44 ; X86-NEXT: fstpt {{[0-9]+}}(%esp)
45 ; X86-NEXT: fstpt (%esp)
47 ; X86-NEXT: calll fmodl
48 ; X86-NEXT: addl $24, %esp
52 ; X64: # %bb.0: # %entry
53 ; X64-NEXT: subq $40, %rsp
54 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
55 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
56 ; X64-NEXT: fstpt {{[0-9]+}}(%rsp)
57 ; X64-NEXT: fstpt (%rsp)
59 ; X64-NEXT: callq fmodl@PLT
60 ; X64-NEXT: addq $40, %rsp
63 %div = call x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80 %x, x86_fp80 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
67 define x86_fp80 @ceil(x86_fp80 %x) nounwind strictfp {
69 ; X86: # %bb.0: # %entry
70 ; X86-NEXT: subl $12, %esp
71 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
72 ; X86-NEXT: fstpt (%esp)
74 ; X86-NEXT: calll ceill
75 ; X86-NEXT: addl $12, %esp
79 ; X64: # %bb.0: # %entry
80 ; X64-NEXT: subq $24, %rsp
81 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
82 ; X64-NEXT: fstpt (%rsp)
84 ; X64-NEXT: callq ceill@PLT
85 ; X64-NEXT: addq $24, %rsp
88 %ceil = call x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
92 define x86_fp80 @cos(x86_fp80 %x) nounwind strictfp {
94 ; X86: # %bb.0: # %entry
95 ; X86-NEXT: subl $12, %esp
96 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
97 ; X86-NEXT: fstpt (%esp)
99 ; X86-NEXT: calll cosl
100 ; X86-NEXT: addl $12, %esp
104 ; X64: # %bb.0: # %entry
105 ; X64-NEXT: subq $24, %rsp
106 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
107 ; X64-NEXT: fstpt (%rsp)
109 ; X64-NEXT: callq cosl@PLT
110 ; X64-NEXT: addq $24, %rsp
113 %cos = call x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
117 define x86_fp80 @exp(x86_fp80 %x) nounwind strictfp {
119 ; X86: # %bb.0: # %entry
120 ; X86-NEXT: subl $12, %esp
121 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
122 ; X86-NEXT: fstpt (%esp)
124 ; X86-NEXT: calll expl
125 ; X86-NEXT: addl $12, %esp
129 ; X64: # %bb.0: # %entry
130 ; X64-NEXT: subq $24, %rsp
131 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
132 ; X64-NEXT: fstpt (%rsp)
134 ; X64-NEXT: callq expl@PLT
135 ; X64-NEXT: addq $24, %rsp
138 %exp = call x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
142 define x86_fp80 @exp2(x86_fp80 %x) nounwind strictfp {
144 ; X86: # %bb.0: # %entry
145 ; X86-NEXT: subl $12, %esp
146 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
147 ; X86-NEXT: fstpt (%esp)
149 ; X86-NEXT: calll exp2l
150 ; X86-NEXT: addl $12, %esp
154 ; X64: # %bb.0: # %entry
155 ; X64-NEXT: subq $24, %rsp
156 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
157 ; X64-NEXT: fstpt (%rsp)
159 ; X64-NEXT: callq exp2l@PLT
160 ; X64-NEXT: addq $24, %rsp
163 %exp2 = call x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
167 define x86_fp80 @floor(x86_fp80 %x) nounwind strictfp {
169 ; X86: # %bb.0: # %entry
170 ; X86-NEXT: subl $12, %esp
171 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
172 ; X86-NEXT: fstpt (%esp)
174 ; X86-NEXT: calll floorl
175 ; X86-NEXT: addl $12, %esp
179 ; X64: # %bb.0: # %entry
180 ; X64-NEXT: subq $24, %rsp
181 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
182 ; X64-NEXT: fstpt (%rsp)
184 ; X64-NEXT: callq floorl@PLT
185 ; X64-NEXT: addq $24, %rsp
188 %floor = call x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
192 define x86_fp80 @log(x86_fp80 %x) nounwind strictfp {
194 ; X86: # %bb.0: # %entry
195 ; X86-NEXT: subl $12, %esp
196 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
197 ; X86-NEXT: fstpt (%esp)
199 ; X86-NEXT: calll logl
200 ; X86-NEXT: addl $12, %esp
204 ; X64: # %bb.0: # %entry
205 ; X64-NEXT: subq $24, %rsp
206 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
207 ; X64-NEXT: fstpt (%rsp)
209 ; X64-NEXT: callq logl@PLT
210 ; X64-NEXT: addq $24, %rsp
213 %log = call x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
217 define x86_fp80 @log10(x86_fp80 %x) nounwind strictfp {
219 ; X86: # %bb.0: # %entry
220 ; X86-NEXT: subl $12, %esp
221 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
222 ; X86-NEXT: fstpt (%esp)
224 ; X86-NEXT: calll log10l
225 ; X86-NEXT: addl $12, %esp
229 ; X64: # %bb.0: # %entry
230 ; X64-NEXT: subq $24, %rsp
231 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
232 ; X64-NEXT: fstpt (%rsp)
234 ; X64-NEXT: callq log10l@PLT
235 ; X64-NEXT: addq $24, %rsp
238 %log10 = call x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
242 define x86_fp80 @log2(x86_fp80 %x) nounwind strictfp {
244 ; X86: # %bb.0: # %entry
245 ; X86-NEXT: subl $12, %esp
246 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
247 ; X86-NEXT: fstpt (%esp)
249 ; X86-NEXT: calll log2l
250 ; X86-NEXT: addl $12, %esp
254 ; X64: # %bb.0: # %entry
255 ; X64-NEXT: subq $24, %rsp
256 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
257 ; X64-NEXT: fstpt (%rsp)
259 ; X64-NEXT: callq log2l@PLT
260 ; X64-NEXT: addq $24, %rsp
263 %log2 = call x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
267 define x86_fp80 @maxnum(x86_fp80 %x, x86_fp80 %y) nounwind strictfp {
269 ; X86: # %bb.0: # %entry
270 ; X86-NEXT: subl $24, %esp
271 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
272 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
273 ; X86-NEXT: fstpt {{[0-9]+}}(%esp)
274 ; X86-NEXT: fstpt (%esp)
276 ; X86-NEXT: calll fmaxl
277 ; X86-NEXT: addl $24, %esp
281 ; X64: # %bb.0: # %entry
282 ; X64-NEXT: subq $40, %rsp
283 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
284 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
285 ; X64-NEXT: fstpt {{[0-9]+}}(%rsp)
286 ; X64-NEXT: fstpt (%rsp)
288 ; X64-NEXT: callq fmaxl@PLT
289 ; X64-NEXT: addq $40, %rsp
292 %maxnum = call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %x, x86_fp80 %y, metadata !"fpexcept.strict") #0
296 define x86_fp80 @minnum(x86_fp80 %x, x86_fp80 %y) nounwind strictfp {
298 ; X86: # %bb.0: # %entry
299 ; X86-NEXT: subl $24, %esp
300 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
301 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
302 ; X86-NEXT: fstpt {{[0-9]+}}(%esp)
303 ; X86-NEXT: fstpt (%esp)
305 ; X86-NEXT: calll fminl
306 ; X86-NEXT: addl $24, %esp
310 ; X64: # %bb.0: # %entry
311 ; X64-NEXT: subq $40, %rsp
312 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
313 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
314 ; X64-NEXT: fstpt {{[0-9]+}}(%rsp)
315 ; X64-NEXT: fstpt (%rsp)
317 ; X64-NEXT: callq fminl@PLT
318 ; X64-NEXT: addq $40, %rsp
321 %minnum = call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %x, x86_fp80 %y, metadata !"fpexcept.strict") #0
325 define x86_fp80 @nearbyint(x86_fp80 %x) nounwind strictfp {
326 ; X86-LABEL: nearbyint:
327 ; X86: # %bb.0: # %entry
328 ; X86-NEXT: subl $12, %esp
329 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
330 ; X86-NEXT: fstpt (%esp)
332 ; X86-NEXT: calll nearbyintl
333 ; X86-NEXT: addl $12, %esp
336 ; X64-LABEL: nearbyint:
337 ; X64: # %bb.0: # %entry
338 ; X64-NEXT: subq $24, %rsp
339 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
340 ; X64-NEXT: fstpt (%rsp)
342 ; X64-NEXT: callq nearbyintl@PLT
343 ; X64-NEXT: addq $24, %rsp
346 %nearbyint = call x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
347 ret x86_fp80 %nearbyint
350 define x86_fp80 @pow(x86_fp80 %x, x86_fp80 %y) nounwind strictfp {
352 ; X86: # %bb.0: # %entry
353 ; X86-NEXT: subl $24, %esp
354 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
355 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
356 ; X86-NEXT: fstpt {{[0-9]+}}(%esp)
357 ; X86-NEXT: fstpt (%esp)
359 ; X86-NEXT: calll powl
360 ; X86-NEXT: addl $24, %esp
364 ; X64: # %bb.0: # %entry
365 ; X64-NEXT: subq $40, %rsp
366 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
367 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
368 ; X64-NEXT: fstpt {{[0-9]+}}(%rsp)
369 ; X64-NEXT: fstpt (%rsp)
371 ; X64-NEXT: callq powl@PLT
372 ; X64-NEXT: addq $40, %rsp
375 %pow = call x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80 %x, x86_fp80 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
379 define x86_fp80 @powi(x86_fp80 %x, i32 %y) nounwind strictfp {
381 ; X86: # %bb.0: # %entry
382 ; X86-NEXT: subl $16, %esp
383 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
385 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
386 ; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
387 ; X86-NEXT: fstpt (%esp)
389 ; X86-NEXT: calll __powixf2
390 ; X86-NEXT: addl $16, %esp
394 ; X64: # %bb.0: # %entry
395 ; X64-NEXT: subq $24, %rsp
396 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
397 ; X64-NEXT: fstpt (%rsp)
399 ; X64-NEXT: callq __powixf2@PLT
400 ; X64-NEXT: addq $24, %rsp
403 %powi = call x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80 %x, i32 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
407 define x86_fp80 @rint(x86_fp80 %x) nounwind strictfp {
409 ; X86: # %bb.0: # %entry
410 ; X86-NEXT: subl $12, %esp
411 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
412 ; X86-NEXT: fstpt (%esp)
414 ; X86-NEXT: calll rintl
415 ; X86-NEXT: addl $12, %esp
419 ; X64: # %bb.0: # %entry
420 ; X64-NEXT: subq $24, %rsp
421 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
422 ; X64-NEXT: fstpt (%rsp)
424 ; X64-NEXT: callq rintl@PLT
425 ; X64-NEXT: addq $24, %rsp
428 %rint = call x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
432 define x86_fp80 @round(x86_fp80 %x) nounwind strictfp {
434 ; X86: # %bb.0: # %entry
435 ; X86-NEXT: subl $12, %esp
436 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
437 ; X86-NEXT: fstpt (%esp)
439 ; X86-NEXT: calll roundl
440 ; X86-NEXT: addl $12, %esp
444 ; X64: # %bb.0: # %entry
445 ; X64-NEXT: subq $24, %rsp
446 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
447 ; X64-NEXT: fstpt (%rsp)
449 ; X64-NEXT: callq roundl@PLT
450 ; X64-NEXT: addq $24, %rsp
453 %round = call x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
457 define x86_fp80 @roundeven(x86_fp80 %x) nounwind strictfp {
458 ; X86-LABEL: roundeven:
459 ; X86: # %bb.0: # %entry
460 ; X86-NEXT: subl $12, %esp
461 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
462 ; X86-NEXT: fstpt (%esp)
464 ; X86-NEXT: calll roundevenl
465 ; X86-NEXT: addl $12, %esp
468 ; X64-LABEL: roundeven:
469 ; X64: # %bb.0: # %entry
470 ; X64-NEXT: subq $24, %rsp
471 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
472 ; X64-NEXT: fstpt (%rsp)
474 ; X64-NEXT: callq roundevenl@PLT
475 ; X64-NEXT: addq $24, %rsp
478 %roundeven = call x86_fp80 @llvm.experimental.constrained.roundeven.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
479 ret x86_fp80 %roundeven
482 define x86_fp80 @sin(x86_fp80 %x) nounwind strictfp {
484 ; X86: # %bb.0: # %entry
485 ; X86-NEXT: subl $12, %esp
486 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
487 ; X86-NEXT: fstpt (%esp)
489 ; X86-NEXT: calll sinl
490 ; X86-NEXT: addl $12, %esp
494 ; X64: # %bb.0: # %entry
495 ; X64-NEXT: subq $24, %rsp
496 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
497 ; X64-NEXT: fstpt (%rsp)
499 ; X64-NEXT: callq sinl@PLT
500 ; X64-NEXT: addq $24, %rsp
503 %sin = call x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
507 define x86_fp80 @trunc(x86_fp80 %x) nounwind strictfp {
509 ; X86: # %bb.0: # %entry
510 ; X86-NEXT: subl $12, %esp
511 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
512 ; X86-NEXT: fstpt (%esp)
514 ; X86-NEXT: calll truncl
515 ; X86-NEXT: addl $12, %esp
519 ; X64: # %bb.0: # %entry
520 ; X64-NEXT: subq $24, %rsp
521 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
522 ; X64-NEXT: fstpt (%rsp)
524 ; X64-NEXT: callq truncl@PLT
525 ; X64-NEXT: addq $24, %rsp
528 %trunc = call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
532 define i32 @lrint(x86_fp80 %x) nounwind strictfp {
534 ; X86: # %bb.0: # %entry
535 ; X86-NEXT: subl $12, %esp
536 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
537 ; X86-NEXT: fstpt (%esp)
539 ; X86-NEXT: calll lrintl
540 ; X86-NEXT: addl $12, %esp
544 ; X64: # %bb.0: # %entry
545 ; X64-NEXT: subq $24, %rsp
546 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
547 ; X64-NEXT: fstpt (%rsp)
549 ; X64-NEXT: callq lrintl@PLT
550 ; X64-NEXT: addq $24, %rsp
553 %rint = call i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
557 define i64 @llrint(x86_fp80 %x) nounwind strictfp {
559 ; X86: # %bb.0: # %entry
560 ; X86-NEXT: subl $12, %esp
561 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
562 ; X86-NEXT: fstpt (%esp)
564 ; X86-NEXT: calll llrintl
565 ; X86-NEXT: addl $12, %esp
569 ; X64: # %bb.0: # %entry
570 ; X64-NEXT: subq $24, %rsp
571 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
572 ; X64-NEXT: fstpt (%rsp)
574 ; X64-NEXT: callq llrintl@PLT
575 ; X64-NEXT: addq $24, %rsp
578 %rint = call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
582 define i32 @lround(x86_fp80 %x) nounwind strictfp {
584 ; X86: # %bb.0: # %entry
585 ; X86-NEXT: subl $12, %esp
586 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
587 ; X86-NEXT: fstpt (%esp)
589 ; X86-NEXT: calll lroundl
590 ; X86-NEXT: addl $12, %esp
594 ; X64: # %bb.0: # %entry
595 ; X64-NEXT: subq $24, %rsp
596 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
597 ; X64-NEXT: fstpt (%rsp)
599 ; X64-NEXT: callq lroundl@PLT
600 ; X64-NEXT: addq $24, %rsp
603 %round = call i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
607 define i64 @llround(x86_fp80 %x) nounwind strictfp {
608 ; X86-LABEL: llround:
609 ; X86: # %bb.0: # %entry
610 ; X86-NEXT: subl $12, %esp
611 ; X86-NEXT: fldt {{[0-9]+}}(%esp)
612 ; X86-NEXT: fstpt (%esp)
614 ; X86-NEXT: calll llroundl
615 ; X86-NEXT: addl $12, %esp
618 ; X64-LABEL: llround:
619 ; X64: # %bb.0: # %entry
620 ; X64-NEXT: subq $24, %rsp
621 ; X64-NEXT: fldt {{[0-9]+}}(%rsp)
622 ; X64-NEXT: fstpt (%rsp)
624 ; X64-NEXT: callq llroundl@PLT
625 ; X64-NEXT: addq $24, %rsp
628 %round = call i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80 %x, metadata !"fpexcept.strict") #0
632 attributes #0 = { strictfp }
634 declare x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80, x86_fp80, x86_fp80, metadata, metadata)
635 declare x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80, x86_fp80, metadata, metadata)
636 declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata)
637 declare x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80, metadata, metadata)
638 declare x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80, metadata, metadata)
639 declare x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80, metadata, metadata)
640 declare x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80, metadata)
641 declare x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80, metadata, metadata)
642 declare x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80, metadata, metadata)
643 declare x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80, metadata, metadata)
644 declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80, x86_fp80, metadata)
645 declare x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80, x86_fp80, metadata)
646 declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80, metadata, metadata)
647 declare x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80, x86_fp80, metadata, metadata)
648 declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadata)
649 declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, metadata)
650 declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata)
651 declare x86_fp80 @llvm.experimental.constrained.roundeven.f80(x86_fp80, metadata)
652 declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata)
653 declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
654 declare i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80, metadata, metadata)
655 declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata)
656 declare i32 @llvm.experimental.constrained.lround.i32.f80(x86_fp80, metadata)
657 declare i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80, metadata)