1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx | FileCheck %s --check-prefix=X64
3 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx | FileCheck %s --check-prefix=X64
4 ; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=-mmx | FileCheck %s --check-prefix=X64_NO_MMX
5 ; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=+mmx | FileCheck %s --check-prefix=X32
7 ; Check soft floating point conversion function calls.
9 @vi32 = common global i32 0, align 4
10 @vi64 = common global i64 0, align 8
11 @vu32 = common global i32 0, align 4
12 @vu64 = common global i64 0, align 8
13 @vf32 = common global float 0.000000e+00, align 4
14 @vf64 = common global double 0.000000e+00, align 8
15 @vf80 = common global x86_fp80 0xK00000000000000000000, align 8
16 @vf128 = common global fp128 0xL00000000000000000000000000000000, align 16
18 define void @TestFPExtF32_F128() nounwind {
19 ; X64-LABEL: TestFPExtF32_F128:
20 ; X64: # %bb.0: # %entry
21 ; X64-NEXT: pushq %rax
22 ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
23 ; X64-NEXT: callq __extendsftf2
24 ; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
28 ; X64_NO_MMX-LABEL: TestFPExtF32_F128:
29 ; X64_NO_MMX: # %bb.0: # %entry
30 ; X64_NO_MMX-NEXT: pushq %rax
31 ; X64_NO_MMX-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
32 ; X64_NO_MMX-NEXT: callq __extendsftf2
33 ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip)
34 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
35 ; X64_NO_MMX-NEXT: popq %rax
36 ; X64_NO_MMX-NEXT: retq
38 ; X32-LABEL: TestFPExtF32_F128:
39 ; X32: # %bb.0: # %entry
40 ; X32-NEXT: pushl %esi
41 ; X32-NEXT: subl $24, %esp
43 ; X32-NEXT: fstps {{[0-9]+}}(%esp)
44 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
45 ; X32-NEXT: movl %eax, (%esp)
46 ; X32-NEXT: calll __extendsftf2
47 ; X32-NEXT: subl $4, %esp
48 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
49 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
50 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
51 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
52 ; X32-NEXT: movl %esi, vf128+12
53 ; X32-NEXT: movl %edx, vf128+8
54 ; X32-NEXT: movl %ecx, vf128+4
55 ; X32-NEXT: movl %eax, vf128
56 ; X32-NEXT: addl $24, %esp
60 %0 = load float, float* @vf32, align 4
61 %conv = fpext float %0 to fp128
62 store fp128 %conv, fp128* @vf128, align 16
66 define void @TestFPExtF64_F128() nounwind {
67 ; X64-LABEL: TestFPExtF64_F128:
68 ; X64: # %bb.0: # %entry
69 ; X64-NEXT: pushq %rax
70 ; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
71 ; X64-NEXT: callq __extenddftf2
72 ; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
76 ; X64_NO_MMX-LABEL: TestFPExtF64_F128:
77 ; X64_NO_MMX: # %bb.0: # %entry
78 ; X64_NO_MMX-NEXT: pushq %rax
79 ; X64_NO_MMX-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
80 ; X64_NO_MMX-NEXT: callq __extenddftf2
81 ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip)
82 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
83 ; X64_NO_MMX-NEXT: popq %rax
84 ; X64_NO_MMX-NEXT: retq
86 ; X32-LABEL: TestFPExtF64_F128:
87 ; X32: # %bb.0: # %entry
88 ; X32-NEXT: pushl %esi
89 ; X32-NEXT: subl $40, %esp
91 ; X32-NEXT: fstpl {{[0-9]+}}(%esp)
92 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
93 ; X32-NEXT: movl %eax, (%esp)
94 ; X32-NEXT: calll __extenddftf2
95 ; X32-NEXT: subl $4, %esp
96 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
97 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
98 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
99 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
100 ; X32-NEXT: movl %esi, vf128+12
101 ; X32-NEXT: movl %edx, vf128+8
102 ; X32-NEXT: movl %ecx, vf128+4
103 ; X32-NEXT: movl %eax, vf128
104 ; X32-NEXT: addl $40, %esp
105 ; X32-NEXT: popl %esi
108 %0 = load double, double* @vf64, align 8
109 %conv = fpext double %0 to fp128
110 store fp128 %conv, fp128* @vf128, align 16
114 define void @TestFPExtF80_F128() nounwind {
115 ; X64-LABEL: TestFPExtF80_F128:
116 ; X64: # %bb.0: # %entry
117 ; X64-NEXT: subq $24, %rsp
118 ; X64-NEXT: fldt {{.*}}(%rip)
119 ; X64-NEXT: fstpt (%rsp)
120 ; X64-NEXT: callq __extendxftf2
121 ; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
122 ; X64-NEXT: addq $24, %rsp
125 ; X64_NO_MMX-LABEL: TestFPExtF80_F128:
126 ; X64_NO_MMX: # %bb.0: # %entry
127 ; X64_NO_MMX-NEXT: subq $24, %rsp
128 ; X64_NO_MMX-NEXT: fldt {{.*}}(%rip)
129 ; X64_NO_MMX-NEXT: fstpt (%rsp)
130 ; X64_NO_MMX-NEXT: callq __extendxftf2
131 ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip)
132 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
133 ; X64_NO_MMX-NEXT: addq $24, %rsp
134 ; X64_NO_MMX-NEXT: retq
136 ; X32-LABEL: TestFPExtF80_F128:
137 ; X32: # %bb.0: # %entry
138 ; X32-NEXT: pushl %esi
139 ; X32-NEXT: subl $40, %esp
140 ; X32-NEXT: fldt vf80
141 ; X32-NEXT: fstpt {{[0-9]+}}(%esp)
142 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
143 ; X32-NEXT: movl %eax, (%esp)
144 ; X32-NEXT: calll __extendxftf2
145 ; X32-NEXT: subl $4, %esp
146 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
147 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
148 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
149 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
150 ; X32-NEXT: movl %esi, vf128+12
151 ; X32-NEXT: movl %edx, vf128+8
152 ; X32-NEXT: movl %ecx, vf128+4
153 ; X32-NEXT: movl %eax, vf128
154 ; X32-NEXT: addl $40, %esp
155 ; X32-NEXT: popl %esi
158 %0 = load x86_fp80, x86_fp80* @vf80, align 8
159 %conv = fpext x86_fp80 %0 to fp128
160 store fp128 %conv, fp128* @vf128, align 16
164 define void @TestFPToSIF128_I32() nounwind {
165 ; X64-LABEL: TestFPToSIF128_I32:
166 ; X64: # %bb.0: # %entry
167 ; X64-NEXT: pushq %rax
168 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
169 ; X64-NEXT: callq __fixtfsi
170 ; X64-NEXT: movl %eax, {{.*}}(%rip)
171 ; X64-NEXT: popq %rax
174 ; X64_NO_MMX-LABEL: TestFPToSIF128_I32:
175 ; X64_NO_MMX: # %bb.0: # %entry
176 ; X64_NO_MMX-NEXT: pushq %rax
177 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
178 ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi
179 ; X64_NO_MMX-NEXT: callq __fixtfsi
180 ; X64_NO_MMX-NEXT: movl %eax, {{.*}}(%rip)
181 ; X64_NO_MMX-NEXT: popq %rax
182 ; X64_NO_MMX-NEXT: retq
184 ; X32-LABEL: TestFPToSIF128_I32:
185 ; X32: # %bb.0: # %entry
186 ; X32-NEXT: subl $12, %esp
187 ; X32-NEXT: pushl vf128+12
188 ; X32-NEXT: pushl vf128+8
189 ; X32-NEXT: pushl vf128+4
190 ; X32-NEXT: pushl vf128
191 ; X32-NEXT: calll __fixtfsi
192 ; X32-NEXT: addl $16, %esp
193 ; X32-NEXT: movl %eax, vi32
194 ; X32-NEXT: addl $12, %esp
197 %0 = load fp128, fp128* @vf128, align 16
198 %conv = fptosi fp128 %0 to i32
199 store i32 %conv, i32* @vi32, align 4
203 define void @TestFPToUIF128_U32() nounwind {
204 ; X64-LABEL: TestFPToUIF128_U32:
205 ; X64: # %bb.0: # %entry
206 ; X64-NEXT: pushq %rax
207 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
208 ; X64-NEXT: callq __fixunstfsi
209 ; X64-NEXT: movl %eax, {{.*}}(%rip)
210 ; X64-NEXT: popq %rax
213 ; X64_NO_MMX-LABEL: TestFPToUIF128_U32:
214 ; X64_NO_MMX: # %bb.0: # %entry
215 ; X64_NO_MMX-NEXT: pushq %rax
216 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
217 ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi
218 ; X64_NO_MMX-NEXT: callq __fixunstfsi
219 ; X64_NO_MMX-NEXT: movl %eax, {{.*}}(%rip)
220 ; X64_NO_MMX-NEXT: popq %rax
221 ; X64_NO_MMX-NEXT: retq
223 ; X32-LABEL: TestFPToUIF128_U32:
224 ; X32: # %bb.0: # %entry
225 ; X32-NEXT: subl $12, %esp
226 ; X32-NEXT: pushl vf128+12
227 ; X32-NEXT: pushl vf128+8
228 ; X32-NEXT: pushl vf128+4
229 ; X32-NEXT: pushl vf128
230 ; X32-NEXT: calll __fixunstfsi
231 ; X32-NEXT: addl $16, %esp
232 ; X32-NEXT: movl %eax, vu32
233 ; X32-NEXT: addl $12, %esp
236 %0 = load fp128, fp128* @vf128, align 16
237 %conv = fptoui fp128 %0 to i32
238 store i32 %conv, i32* @vu32, align 4
242 define void @TestFPToSIF128_I64() nounwind {
243 ; X64-LABEL: TestFPToSIF128_I64:
244 ; X64: # %bb.0: # %entry
245 ; X64-NEXT: pushq %rax
246 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
247 ; X64-NEXT: callq __fixtfsi
249 ; X64-NEXT: movq %rax, {{.*}}(%rip)
250 ; X64-NEXT: popq %rax
253 ; X64_NO_MMX-LABEL: TestFPToSIF128_I64:
254 ; X64_NO_MMX: # %bb.0: # %entry
255 ; X64_NO_MMX-NEXT: pushq %rax
256 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
257 ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi
258 ; X64_NO_MMX-NEXT: callq __fixtfsi
259 ; X64_NO_MMX-NEXT: cltq
260 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
261 ; X64_NO_MMX-NEXT: popq %rax
262 ; X64_NO_MMX-NEXT: retq
264 ; X32-LABEL: TestFPToSIF128_I64:
265 ; X32: # %bb.0: # %entry
266 ; X32-NEXT: subl $12, %esp
267 ; X32-NEXT: pushl vf128+12
268 ; X32-NEXT: pushl vf128+8
269 ; X32-NEXT: pushl vf128+4
270 ; X32-NEXT: pushl vf128
271 ; X32-NEXT: calll __fixtfsi
272 ; X32-NEXT: addl $16, %esp
273 ; X32-NEXT: movl %eax, vi64
274 ; X32-NEXT: sarl $31, %eax
275 ; X32-NEXT: movl %eax, vi64+4
276 ; X32-NEXT: addl $12, %esp
279 %0 = load fp128, fp128* @vf128, align 16
280 %conv = fptosi fp128 %0 to i32
281 %conv1 = sext i32 %conv to i64
282 store i64 %conv1, i64* @vi64, align 8
286 define void @TestFPToUIF128_U64() nounwind {
287 ; X64-LABEL: TestFPToUIF128_U64:
288 ; X64: # %bb.0: # %entry
289 ; X64-NEXT: pushq %rax
290 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
291 ; X64-NEXT: callq __fixunstfsi
292 ; X64-NEXT: movl %eax, %eax
293 ; X64-NEXT: movq %rax, {{.*}}(%rip)
294 ; X64-NEXT: popq %rax
297 ; X64_NO_MMX-LABEL: TestFPToUIF128_U64:
298 ; X64_NO_MMX: # %bb.0: # %entry
299 ; X64_NO_MMX-NEXT: pushq %rax
300 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
301 ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi
302 ; X64_NO_MMX-NEXT: callq __fixunstfsi
303 ; X64_NO_MMX-NEXT: movl %eax, %eax
304 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
305 ; X64_NO_MMX-NEXT: popq %rax
306 ; X64_NO_MMX-NEXT: retq
308 ; X32-LABEL: TestFPToUIF128_U64:
309 ; X32: # %bb.0: # %entry
310 ; X32-NEXT: subl $12, %esp
311 ; X32-NEXT: pushl vf128+12
312 ; X32-NEXT: pushl vf128+8
313 ; X32-NEXT: pushl vf128+4
314 ; X32-NEXT: pushl vf128
315 ; X32-NEXT: calll __fixunstfsi
316 ; X32-NEXT: addl $16, %esp
317 ; X32-NEXT: movl %eax, vu64
318 ; X32-NEXT: movl $0, vu64+4
319 ; X32-NEXT: addl $12, %esp
322 %0 = load fp128, fp128* @vf128, align 16
323 %conv = fptoui fp128 %0 to i32
324 %conv1 = zext i32 %conv to i64
325 store i64 %conv1, i64* @vu64, align 8
329 define void @TestFPTruncF128_F32() nounwind {
330 ; X64-LABEL: TestFPTruncF128_F32:
331 ; X64: # %bb.0: # %entry
332 ; X64-NEXT: pushq %rax
333 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
334 ; X64-NEXT: callq __trunctfsf2
335 ; X64-NEXT: movss %xmm0, {{.*}}(%rip)
336 ; X64-NEXT: popq %rax
339 ; X64_NO_MMX-LABEL: TestFPTruncF128_F32:
340 ; X64_NO_MMX: # %bb.0: # %entry
341 ; X64_NO_MMX-NEXT: pushq %rax
342 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
343 ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi
344 ; X64_NO_MMX-NEXT: callq __trunctfsf2
345 ; X64_NO_MMX-NEXT: movss %xmm0, {{.*}}(%rip)
346 ; X64_NO_MMX-NEXT: popq %rax
347 ; X64_NO_MMX-NEXT: retq
349 ; X32-LABEL: TestFPTruncF128_F32:
350 ; X32: # %bb.0: # %entry
351 ; X32-NEXT: subl $12, %esp
352 ; X32-NEXT: pushl vf128+12
353 ; X32-NEXT: pushl vf128+8
354 ; X32-NEXT: pushl vf128+4
355 ; X32-NEXT: pushl vf128
356 ; X32-NEXT: calll __trunctfsf2
357 ; X32-NEXT: addl $16, %esp
358 ; X32-NEXT: fstps vf32
359 ; X32-NEXT: addl $12, %esp
362 %0 = load fp128, fp128* @vf128, align 16
363 %conv = fptrunc fp128 %0 to float
364 store float %conv, float* @vf32, align 4
368 define void @TestFPTruncF128_F64() nounwind {
369 ; X64-LABEL: TestFPTruncF128_F64:
370 ; X64: # %bb.0: # %entry
371 ; X64-NEXT: pushq %rax
372 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
373 ; X64-NEXT: callq __trunctfdf2
374 ; X64-NEXT: movsd %xmm0, {{.*}}(%rip)
375 ; X64-NEXT: popq %rax
378 ; X64_NO_MMX-LABEL: TestFPTruncF128_F64:
379 ; X64_NO_MMX: # %bb.0: # %entry
380 ; X64_NO_MMX-NEXT: pushq %rax
381 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
382 ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi
383 ; X64_NO_MMX-NEXT: callq __trunctfdf2
384 ; X64_NO_MMX-NEXT: movsd %xmm0, {{.*}}(%rip)
385 ; X64_NO_MMX-NEXT: popq %rax
386 ; X64_NO_MMX-NEXT: retq
388 ; X32-LABEL: TestFPTruncF128_F64:
389 ; X32: # %bb.0: # %entry
390 ; X32-NEXT: subl $12, %esp
391 ; X32-NEXT: pushl vf128+12
392 ; X32-NEXT: pushl vf128+8
393 ; X32-NEXT: pushl vf128+4
394 ; X32-NEXT: pushl vf128
395 ; X32-NEXT: calll __trunctfdf2
396 ; X32-NEXT: addl $16, %esp
397 ; X32-NEXT: fstpl vf64
398 ; X32-NEXT: addl $12, %esp
401 %0 = load fp128, fp128* @vf128, align 16
402 %conv = fptrunc fp128 %0 to double
403 store double %conv, double* @vf64, align 8
407 define void @TestFPTruncF128_F80() nounwind {
408 ; X64-LABEL: TestFPTruncF128_F80:
409 ; X64: # %bb.0: # %entry
410 ; X64-NEXT: subq $24, %rsp
411 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
412 ; X64-NEXT: callq __trunctfxf2
413 ; X64-NEXT: fstpt (%rsp)
414 ; X64-NEXT: movq (%rsp), %rax
415 ; X64-NEXT: movq %rax, {{.*}}(%rip)
416 ; X64-NEXT: movl {{[0-9]+}}(%rsp), %eax
417 ; X64-NEXT: movw %ax, vf80+{{.*}}(%rip)
418 ; X64-NEXT: addq $24, %rsp
421 ; X64_NO_MMX-LABEL: TestFPTruncF128_F80:
422 ; X64_NO_MMX: # %bb.0: # %entry
423 ; X64_NO_MMX-NEXT: pushq %rax
424 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
425 ; X64_NO_MMX-NEXT: movq vf128+{{.*}}(%rip), %rsi
426 ; X64_NO_MMX-NEXT: callq __trunctfxf2
427 ; X64_NO_MMX-NEXT: fstpt {{.*}}(%rip)
428 ; X64_NO_MMX-NEXT: popq %rax
429 ; X64_NO_MMX-NEXT: retq
431 ; X32-LABEL: TestFPTruncF128_F80:
432 ; X32: # %bb.0: # %entry
433 ; X32-NEXT: subl $12, %esp
434 ; X32-NEXT: pushl vf128+12
435 ; X32-NEXT: pushl vf128+8
436 ; X32-NEXT: pushl vf128+4
437 ; X32-NEXT: pushl vf128
438 ; X32-NEXT: calll __trunctfxf2
439 ; X32-NEXT: addl $16, %esp
440 ; X32-NEXT: fstpt vf80
441 ; X32-NEXT: addl $12, %esp
444 %0 = load fp128, fp128* @vf128, align 16
445 %conv = fptrunc fp128 %0 to x86_fp80
446 store x86_fp80 %conv, x86_fp80* @vf80, align 8
450 define void @TestSIToFPI32_F128() nounwind {
451 ; X64-LABEL: TestSIToFPI32_F128:
452 ; X64: # %bb.0: # %entry
453 ; X64-NEXT: pushq %rax
454 ; X64-NEXT: movl {{.*}}(%rip), %edi
455 ; X64-NEXT: callq __floatsitf
456 ; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
457 ; X64-NEXT: popq %rax
460 ; X64_NO_MMX-LABEL: TestSIToFPI32_F128:
461 ; X64_NO_MMX: # %bb.0: # %entry
462 ; X64_NO_MMX-NEXT: pushq %rax
463 ; X64_NO_MMX-NEXT: movl {{.*}}(%rip), %edi
464 ; X64_NO_MMX-NEXT: callq __floatsitf
465 ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip)
466 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
467 ; X64_NO_MMX-NEXT: popq %rax
468 ; X64_NO_MMX-NEXT: retq
470 ; X32-LABEL: TestSIToFPI32_F128:
471 ; X32: # %bb.0: # %entry
472 ; X32-NEXT: pushl %esi
473 ; X32-NEXT: subl $32, %esp
474 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
475 ; X32-NEXT: pushl vi32
476 ; X32-NEXT: pushl %eax
477 ; X32-NEXT: calll __floatsitf
478 ; X32-NEXT: addl $12, %esp
479 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
480 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
481 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
482 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
483 ; X32-NEXT: movl %esi, vf128+12
484 ; X32-NEXT: movl %edx, vf128+8
485 ; X32-NEXT: movl %ecx, vf128+4
486 ; X32-NEXT: movl %eax, vf128
487 ; X32-NEXT: addl $24, %esp
488 ; X32-NEXT: popl %esi
491 %0 = load i32, i32* @vi32, align 4
492 %conv = sitofp i32 %0 to fp128
493 store fp128 %conv, fp128* @vf128, align 16
497 define void @TestUIToFPU32_F128() #2 {
498 ; X64-LABEL: TestUIToFPU32_F128:
499 ; X64: # %bb.0: # %entry
500 ; X64-NEXT: pushq %rax
501 ; X64-NEXT: movl {{.*}}(%rip), %edi
502 ; X64-NEXT: callq __floatunsitf
503 ; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
504 ; X64-NEXT: popq %rax
507 ; X64_NO_MMX-LABEL: TestUIToFPU32_F128:
508 ; X64_NO_MMX: # %bb.0: # %entry
509 ; X64_NO_MMX-NEXT: pushq %rax
510 ; X64_NO_MMX-NEXT: movl {{.*}}(%rip), %edi
511 ; X64_NO_MMX-NEXT: callq __floatunsitf
512 ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip)
513 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
514 ; X64_NO_MMX-NEXT: popq %rax
515 ; X64_NO_MMX-NEXT: retq
517 ; X32-LABEL: TestUIToFPU32_F128:
518 ; X32: # %bb.0: # %entry
519 ; X32-NEXT: pushl %esi
520 ; X32-NEXT: subl $32, %esp
521 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
522 ; X32-NEXT: pushl vu32
523 ; X32-NEXT: pushl %eax
524 ; X32-NEXT: calll __floatunsitf
525 ; X32-NEXT: addl $12, %esp
526 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
527 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
528 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
529 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
530 ; X32-NEXT: movl %esi, vf128+12
531 ; X32-NEXT: movl %edx, vf128+8
532 ; X32-NEXT: movl %ecx, vf128+4
533 ; X32-NEXT: movl %eax, vf128
534 ; X32-NEXT: addl $24, %esp
535 ; X32-NEXT: popl %esi
538 %0 = load i32, i32* @vu32, align 4
539 %conv = uitofp i32 %0 to fp128
540 store fp128 %conv, fp128* @vf128, align 16
544 define void @TestSIToFPI64_F128() nounwind {
545 ; X64-LABEL: TestSIToFPI64_F128:
546 ; X64: # %bb.0: # %entry
547 ; X64-NEXT: pushq %rax
548 ; X64-NEXT: movq {{.*}}(%rip), %rdi
549 ; X64-NEXT: callq __floatditf
550 ; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
551 ; X64-NEXT: popq %rax
554 ; X64_NO_MMX-LABEL: TestSIToFPI64_F128:
555 ; X64_NO_MMX: # %bb.0: # %entry
556 ; X64_NO_MMX-NEXT: pushq %rax
557 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
558 ; X64_NO_MMX-NEXT: callq __floatditf
559 ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip)
560 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
561 ; X64_NO_MMX-NEXT: popq %rax
562 ; X64_NO_MMX-NEXT: retq
564 ; X32-LABEL: TestSIToFPI64_F128:
565 ; X32: # %bb.0: # %entry
566 ; X32-NEXT: pushl %esi
567 ; X32-NEXT: subl $28, %esp
568 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
569 ; X32-NEXT: pushl vi64+4
570 ; X32-NEXT: pushl vi64
571 ; X32-NEXT: pushl %eax
572 ; X32-NEXT: calll __floatditf
573 ; X32-NEXT: addl $12, %esp
574 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
575 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
576 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
577 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
578 ; X32-NEXT: movl %esi, vf128+12
579 ; X32-NEXT: movl %edx, vf128+8
580 ; X32-NEXT: movl %ecx, vf128+4
581 ; X32-NEXT: movl %eax, vf128
582 ; X32-NEXT: addl $24, %esp
583 ; X32-NEXT: popl %esi
586 %0 = load i64, i64* @vi64, align 8
587 %conv = sitofp i64 %0 to fp128
588 store fp128 %conv, fp128* @vf128, align 16
592 define void @TestUIToFPU64_F128() #2 {
593 ; X64-LABEL: TestUIToFPU64_F128:
594 ; X64: # %bb.0: # %entry
595 ; X64-NEXT: pushq %rax
596 ; X64-NEXT: movq {{.*}}(%rip), %rdi
597 ; X64-NEXT: callq __floatunditf
598 ; X64-NEXT: movaps %xmm0, {{.*}}(%rip)
599 ; X64-NEXT: popq %rax
602 ; X64_NO_MMX-LABEL: TestUIToFPU64_F128:
603 ; X64_NO_MMX: # %bb.0: # %entry
604 ; X64_NO_MMX-NEXT: pushq %rax
605 ; X64_NO_MMX-NEXT: movq {{.*}}(%rip), %rdi
606 ; X64_NO_MMX-NEXT: callq __floatunditf
607 ; X64_NO_MMX-NEXT: movq %rdx, vf128+{{.*}}(%rip)
608 ; X64_NO_MMX-NEXT: movq %rax, {{.*}}(%rip)
609 ; X64_NO_MMX-NEXT: popq %rax
610 ; X64_NO_MMX-NEXT: retq
612 ; X32-LABEL: TestUIToFPU64_F128:
613 ; X32: # %bb.0: # %entry
614 ; X32-NEXT: pushl %esi
615 ; X32-NEXT: subl $28, %esp
616 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
617 ; X32-NEXT: pushl vu64+4
618 ; X32-NEXT: pushl vu64
619 ; X32-NEXT: pushl %eax
620 ; X32-NEXT: calll __floatunditf
621 ; X32-NEXT: addl $12, %esp
622 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
623 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
624 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
625 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
626 ; X32-NEXT: movl %esi, vf128+12
627 ; X32-NEXT: movl %edx, vf128+8
628 ; X32-NEXT: movl %ecx, vf128+4
629 ; X32-NEXT: movl %eax, vf128
630 ; X32-NEXT: addl $24, %esp
631 ; X32-NEXT: popl %esi
634 %0 = load i64, i64* @vu64, align 8
635 %conv = uitofp i64 %0 to fp128
636 store fp128 %conv, fp128* @vf128, align 16
640 define i32 @TestConst128(fp128 %v) nounwind {
641 ; X64-LABEL: TestConst128:
642 ; X64: # %bb.0: # %entry
643 ; X64-NEXT: pushq %rax
644 ; X64-NEXT: movaps {{.*}}(%rip), %xmm1
645 ; X64-NEXT: callq __gttf2
646 ; X64-NEXT: xorl %ecx, %ecx
647 ; X64-NEXT: testl %eax, %eax
649 ; X64-NEXT: movl %ecx, %eax
650 ; X64-NEXT: popq %rcx
653 ; X64_NO_MMX-LABEL: TestConst128:
654 ; X64_NO_MMX: # %bb.0: # %entry
655 ; X64_NO_MMX-NEXT: pushq %rax
656 ; X64_NO_MMX-NEXT: movabsq $4611404543450677248, %rcx # imm = 0x3FFF000000000000
657 ; X64_NO_MMX-NEXT: xorl %edx, %edx
658 ; X64_NO_MMX-NEXT: callq __gttf2
659 ; X64_NO_MMX-NEXT: xorl %ecx, %ecx
660 ; X64_NO_MMX-NEXT: testl %eax, %eax
661 ; X64_NO_MMX-NEXT: setg %cl
662 ; X64_NO_MMX-NEXT: movl %ecx, %eax
663 ; X64_NO_MMX-NEXT: popq %rcx
664 ; X64_NO_MMX-NEXT: retq
666 ; X32-LABEL: TestConst128:
667 ; X32: # %bb.0: # %entry
668 ; X32-NEXT: subl $12, %esp
669 ; X32-NEXT: pushl $1073676288 # imm = 0x3FFF0000
673 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
674 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
675 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
676 ; X32-NEXT: pushl {{[0-9]+}}(%esp)
677 ; X32-NEXT: calll __gttf2
678 ; X32-NEXT: addl $32, %esp
679 ; X32-NEXT: xorl %ecx, %ecx
680 ; X32-NEXT: testl %eax, %eax
682 ; X32-NEXT: movl %ecx, %eax
683 ; X32-NEXT: addl $12, %esp
686 %cmp = fcmp ogt fp128 %v, 0xL00000000000000003FFF000000000000
687 %conv = zext i1 %cmp to i32
692 ; struct TestBits_ieee_ext {
696 ; union TestBits_LDU {
698 ; struct TestBits_ieee_ext bits;
700 ; int TestBits128(FP128 ld) {
701 ; union TestBits_LDU u;
703 ; return ((u.bits.v1 | u.bits.v2) == 0);
705 define i32 @TestBits128(fp128 %ld) nounwind {
706 ; X64-LABEL: TestBits128:
707 ; X64: # %bb.0: # %entry
708 ; X64-NEXT: subq $24, %rsp
709 ; X64-NEXT: movaps %xmm0, %xmm1
710 ; X64-NEXT: callq __multf3
711 ; X64-NEXT: movaps %xmm0, (%rsp)
712 ; X64-NEXT: movq (%rsp), %rcx
713 ; X64-NEXT: movq %rcx, %rdx
714 ; X64-NEXT: shrq $32, %rdx
715 ; X64-NEXT: xorl %eax, %eax
716 ; X64-NEXT: orl %ecx, %edx
718 ; X64-NEXT: addq $24, %rsp
721 ; X64_NO_MMX-LABEL: TestBits128:
722 ; X64_NO_MMX: # %bb.0: # %entry
723 ; X64_NO_MMX-NEXT: pushq %rax
724 ; X64_NO_MMX-NEXT: movq %rdi, %rdx
725 ; X64_NO_MMX-NEXT: movq %rsi, %rcx
726 ; X64_NO_MMX-NEXT: callq __multf3
727 ; X64_NO_MMX-NEXT: movq %rax, %rdx
728 ; X64_NO_MMX-NEXT: shrq $32, %rdx
729 ; X64_NO_MMX-NEXT: xorl %ecx, %ecx
730 ; X64_NO_MMX-NEXT: orl %eax, %edx
731 ; X64_NO_MMX-NEXT: sete %cl
732 ; X64_NO_MMX-NEXT: movl %ecx, %eax
733 ; X64_NO_MMX-NEXT: popq %rcx
734 ; X64_NO_MMX-NEXT: retq
736 ; X32-LABEL: TestBits128:
737 ; X32: # %bb.0: # %entry
738 ; X32-NEXT: pushl %edi
739 ; X32-NEXT: pushl %esi
740 ; X32-NEXT: subl $20, %esp
741 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
742 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
743 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
744 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
745 ; X32-NEXT: subl $12, %esp
746 ; X32-NEXT: leal {{[0-9]+}}(%esp), %edi
747 ; X32-NEXT: pushl %esi
748 ; X32-NEXT: pushl %edx
749 ; X32-NEXT: pushl %ecx
750 ; X32-NEXT: pushl %eax
751 ; X32-NEXT: pushl %esi
752 ; X32-NEXT: pushl %edx
753 ; X32-NEXT: pushl %ecx
754 ; X32-NEXT: pushl %eax
755 ; X32-NEXT: pushl %edi
756 ; X32-NEXT: calll __multf3
757 ; X32-NEXT: addl $44, %esp
758 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
759 ; X32-NEXT: xorl %eax, %eax
760 ; X32-NEXT: orl (%esp), %ecx
762 ; X32-NEXT: addl $20, %esp
763 ; X32-NEXT: popl %esi
764 ; X32-NEXT: popl %edi
767 %mul = fmul fp128 %ld, %ld
768 %0 = bitcast fp128 %mul to i128
769 %shift = lshr i128 %0, 32
770 %or5 = or i128 %shift, %0
771 %or = trunc i128 %or5 to i32
772 %cmp = icmp eq i32 %or, 0
773 %conv = zext i1 %cmp to i32
775 ; If TestBits128 fails due to any llvm or clang change,
776 ; please make sure the original simplified C code will
777 ; be compiled into correct IL and assembly code, not
778 ; just this TestBits128 test case. Better yet, try to
779 ; test the whole libm and its test cases.
782 ; C code: (compiled with -target x86_64-linux-android)
783 ; typedef long double __float128;
784 ; __float128 TestPair128(unsigned long a, unsigned long b) {
785 ; unsigned __int128 n;
786 ; unsigned __int128 v1 = ((unsigned __int128)a << 64);
787 ; unsigned __int128 v2 = (unsigned __int128)b;
789 ; return *(__float128*)&n;
791 define fp128 @TestPair128(i64 %a, i64 %b) nounwind {
792 ; X64-LABEL: TestPair128:
793 ; X64: # %bb.0: # %entry
794 ; X64-NEXT: addq $3, %rsi
795 ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
796 ; X64-NEXT: adcq $0, %rdi
797 ; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
798 ; X64-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
801 ; X64_NO_MMX-LABEL: TestPair128:
802 ; X64_NO_MMX: # %bb.0: # %entry
803 ; X64_NO_MMX-NEXT: movq %rsi, %rax
804 ; X64_NO_MMX-NEXT: addq $3, %rax
805 ; X64_NO_MMX-NEXT: adcq $0, %rdi
806 ; X64_NO_MMX-NEXT: movq %rdi, %rdx
807 ; X64_NO_MMX-NEXT: retq
809 ; X32-LABEL: TestPair128:
810 ; X32: # %bb.0: # %entry
811 ; X32-NEXT: pushl %edi
812 ; X32-NEXT: pushl %esi
813 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
814 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
815 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
816 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
817 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
818 ; X32-NEXT: addl $3, %ecx
819 ; X32-NEXT: adcl $0, %edx
820 ; X32-NEXT: adcl $0, %esi
821 ; X32-NEXT: adcl $0, %edi
822 ; X32-NEXT: movl %edx, 4(%eax)
823 ; X32-NEXT: movl %ecx, (%eax)
824 ; X32-NEXT: movl %esi, 8(%eax)
825 ; X32-NEXT: movl %edi, 12(%eax)
826 ; X32-NEXT: popl %esi
827 ; X32-NEXT: popl %edi
830 %conv = zext i64 %a to i128
831 %shl = shl nuw i128 %conv, 64
832 %conv1 = zext i64 %b to i128
833 %or = or i128 %shl, %conv1
834 %add = add i128 %or, 3
835 %0 = bitcast i128 %add to fp128
839 define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
840 ; X64-LABEL: TestTruncCopysign:
841 ; X64: # %bb.0: # %entry
842 ; X64-NEXT: cmpl $50001, %edi # imm = 0xC351
843 ; X64-NEXT: jl .LBB17_2
844 ; X64-NEXT: # %bb.1: # %if.then
845 ; X64-NEXT: pushq %rax
846 ; X64-NEXT: callq __trunctfdf2
847 ; X64-NEXT: andps {{.*}}(%rip), %xmm0
848 ; X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
849 ; X64-NEXT: orps %xmm1, %xmm0
850 ; X64-NEXT: callq __extenddftf2
851 ; X64-NEXT: addq $8, %rsp
852 ; X64-NEXT: .LBB17_2: # %cleanup
855 ; X64_NO_MMX-LABEL: TestTruncCopysign:
856 ; X64_NO_MMX: # %bb.0: # %entry
857 ; X64_NO_MMX-NEXT: movl %edx, %ecx
858 ; X64_NO_MMX-NEXT: movq %rsi, %rdx
859 ; X64_NO_MMX-NEXT: movq %rdi, %rax
860 ; X64_NO_MMX-NEXT: cmpl $50001, %ecx # imm = 0xC351
861 ; X64_NO_MMX-NEXT: jl .LBB17_2
862 ; X64_NO_MMX-NEXT: # %bb.1: # %if.then
863 ; X64_NO_MMX-NEXT: pushq %rax
864 ; X64_NO_MMX-NEXT: movq %rax, %rdi
865 ; X64_NO_MMX-NEXT: movq %rdx, %rsi
866 ; X64_NO_MMX-NEXT: callq __trunctfdf2
867 ; X64_NO_MMX-NEXT: andps {{.*}}(%rip), %xmm0
868 ; X64_NO_MMX-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
869 ; X64_NO_MMX-NEXT: orps %xmm1, %xmm0
870 ; X64_NO_MMX-NEXT: callq __extenddftf2
871 ; X64_NO_MMX-NEXT: addq $8, %rsp
872 ; X64_NO_MMX-NEXT: .LBB17_2: # %cleanup
873 ; X64_NO_MMX-NEXT: retq
875 ; X32-LABEL: TestTruncCopysign:
876 ; X32: # %bb.0: # %entry
877 ; X32-NEXT: pushl %edi
878 ; X32-NEXT: pushl %esi
879 ; X32-NEXT: subl $36, %esp
880 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
881 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
882 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
883 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
884 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
885 ; X32-NEXT: cmpl $50001, {{[0-9]+}}(%esp) # imm = 0xC351
886 ; X32-NEXT: jl .LBB17_4
887 ; X32-NEXT: # %bb.1: # %if.then
888 ; X32-NEXT: pushl %eax
889 ; X32-NEXT: pushl %ecx
890 ; X32-NEXT: pushl %edi
891 ; X32-NEXT: pushl %edx
892 ; X32-NEXT: calll __trunctfdf2
893 ; X32-NEXT: addl $16, %esp
894 ; X32-NEXT: fstpl {{[0-9]+}}(%esp)
895 ; X32-NEXT: testb $-128, {{[0-9]+}}(%esp)
896 ; X32-NEXT: flds {{\.LCPI.*}}
897 ; X32-NEXT: flds {{\.LCPI.*}}
898 ; X32-NEXT: jne .LBB17_3
899 ; X32-NEXT: # %bb.2: # %if.then
900 ; X32-NEXT: fstp %st(1)
902 ; X32-NEXT: .LBB17_3: # %if.then
903 ; X32-NEXT: fstp %st(0)
904 ; X32-NEXT: subl $16, %esp
905 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
906 ; X32-NEXT: movl %eax, (%esp)
907 ; X32-NEXT: fstpl {{[0-9]+}}(%esp)
908 ; X32-NEXT: calll __extenddftf2
909 ; X32-NEXT: addl $12, %esp
910 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
911 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
912 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
913 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
914 ; X32-NEXT: .LBB17_4: # %cleanup
915 ; X32-NEXT: movl %edx, (%esi)
916 ; X32-NEXT: movl %edi, 4(%esi)
917 ; X32-NEXT: movl %ecx, 8(%esi)
918 ; X32-NEXT: movl %eax, 12(%esi)
919 ; X32-NEXT: movl %esi, %eax
920 ; X32-NEXT: addl $36, %esp
921 ; X32-NEXT: popl %esi
922 ; X32-NEXT: popl %edi
925 %cmp = icmp sgt i32 %n, 50000
926 br i1 %cmp, label %if.then, label %cleanup
928 if.then: ; preds = %entry
929 %conv = fptrunc fp128 %x to double
930 %call = tail call double @copysign(double 0x7FF0000000000000, double %conv) #2
931 %conv1 = fpext double %call to fp128
934 cleanup: ; preds = %entry, %if.then
935 %retval.0 = phi fp128 [ %conv1, %if.then ], [ %x, %entry ]
939 define i1 @PR34866(i128 %x) nounwind {
940 ; X64-LABEL: PR34866:
942 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
943 ; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
944 ; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi
945 ; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi
946 ; X64-NEXT: orq %rsi, %rdi
950 ; X64_NO_MMX-LABEL: PR34866:
951 ; X64_NO_MMX: # %bb.0:
952 ; X64_NO_MMX-NEXT: orq %rsi, %rdi
953 ; X64_NO_MMX-NEXT: sete %al
954 ; X64_NO_MMX-NEXT: retq
956 ; X32-LABEL: PR34866:
958 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
959 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
960 ; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
961 ; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
962 ; X32-NEXT: orl %ecx, %eax
965 %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128
966 %cmp = icmp eq i128 %bc_mmx, %x
970 define i1 @PR34866_commute(i128 %x) nounwind {
971 ; X64-LABEL: PR34866_commute:
973 ; X64-NEXT: movaps {{.*}}(%rip), %xmm0
974 ; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
975 ; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rsi
976 ; X64-NEXT: xorq -{{[0-9]+}}(%rsp), %rdi
977 ; X64-NEXT: orq %rsi, %rdi
981 ; X64_NO_MMX-LABEL: PR34866_commute:
982 ; X64_NO_MMX: # %bb.0:
983 ; X64_NO_MMX-NEXT: orq %rsi, %rdi
984 ; X64_NO_MMX-NEXT: sete %al
985 ; X64_NO_MMX-NEXT: retq
987 ; X32-LABEL: PR34866_commute:
989 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
990 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
991 ; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
992 ; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
993 ; X32-NEXT: orl %ecx, %eax
996 %bc_mmx = bitcast fp128 0xL00000000000000000000000000000000 to i128
997 %cmp = icmp eq i128 %x, %bc_mmx
1002 declare double @copysign(double, double) #1
1004 attributes #2 = { nounwind readnone }