1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64
9 define float @tst1(float %a, float %b) nounwind {
12 ; X32-NEXT: subl $8, %esp
13 ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
14 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
15 ; X32-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
16 ; X32-NEXT: movss %xmm0, (%esp)
17 ; X32-NEXT: calll copysignf
18 ; X32-NEXT: addl $8, %esp
23 ; X64-NEXT: movaps %xmm0, %xmm2
24 ; X64-NEXT: movaps %xmm1, %xmm0
25 ; X64-NEXT: movaps %xmm2, %xmm1
26 ; X64-NEXT: jmp copysignf # TAILCALL
27 %tmp = tail call float @copysignf( float %b, float %a )
31 define double @tst2(double %a, float %b, float %c) nounwind {
34 ; X32-NEXT: subl $16, %esp
35 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
36 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
37 ; X32-NEXT: addss {{[0-9]+}}(%esp), %xmm1
38 ; X32-NEXT: cvtss2sd %xmm1, %xmm1
39 ; X32-NEXT: movsd %xmm0, (%esp)
40 ; X32-NEXT: movsd %xmm1, {{[0-9]+}}(%esp)
41 ; X32-NEXT: calll copysign
42 ; X32-NEXT: addl $16, %esp
47 ; X64-NEXT: addss %xmm2, %xmm1
48 ; X64-NEXT: cvtss2sd %xmm1, %xmm1
49 ; X64-NEXT: jmp copysign # TAILCALL
50 %tmp1 = fadd float %b, %c
51 %tmp2 = fpext float %tmp1 to double
52 %tmp = tail call double @copysign( double %a, double %tmp2 )
56 declare float @copysignf(float, float)
57 declare double @copysign(double, double)
63 define float @int1(float %a, float %b) nounwind {
66 ; X32-NEXT: pushl %eax
67 ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
68 ; X32-NEXT: andps {{\.LCPI.*}}, %xmm0
69 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
70 ; X32-NEXT: andps {{\.LCPI.*}}, %xmm1
71 ; X32-NEXT: orps %xmm0, %xmm1
72 ; X32-NEXT: movss %xmm1, (%esp)
73 ; X32-NEXT: flds (%esp)
79 ; X64-NEXT: andps {{.*}}(%rip), %xmm0
80 ; X64-NEXT: andps {{.*}}(%rip), %xmm1
81 ; X64-NEXT: orps %xmm1, %xmm0
83 %tmp = tail call float @llvm.copysign.f32( float %b, float %a )
87 define double @int2(double %a, float %b, float %c) nounwind {
90 ; X32-NEXT: pushl %ebp
91 ; X32-NEXT: movl %esp, %ebp
92 ; X32-NEXT: andl $-8, %esp
93 ; X32-NEXT: subl $8, %esp
94 ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
95 ; X32-NEXT: addss 20(%ebp), %xmm0
96 ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
97 ; X32-NEXT: andps {{\.LCPI.*}}, %xmm1
98 ; X32-NEXT: cvtss2sd %xmm0, %xmm0
99 ; X32-NEXT: andps {{\.LCPI.*}}, %xmm0
100 ; X32-NEXT: orps %xmm1, %xmm0
101 ; X32-NEXT: movlps %xmm0, (%esp)
102 ; X32-NEXT: fldl (%esp)
103 ; X32-NEXT: movl %ebp, %esp
104 ; X32-NEXT: popl %ebp
109 ; X64-NEXT: addss %xmm2, %xmm1
110 ; X64-NEXT: cvtss2sd %xmm1, %xmm1
111 ; X64-NEXT: andps {{.*}}(%rip), %xmm1
112 ; X64-NEXT: andps {{.*}}(%rip), %xmm0
113 ; X64-NEXT: orps %xmm1, %xmm0
115 %tmp1 = fadd float %b, %c
116 %tmp2 = fpext float %tmp1 to double
117 %tmp = tail call double @llvm.copysign.f64( double %a, double %tmp2 )
121 define float @cst1() nounwind {
130 ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
132 %tmp = tail call float @llvm.copysign.f32( float 1.0, float -2.0 )
136 define double @cst2() nounwind {
145 ; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
147 %tmp1 = fadd float -1.0, -1.0
148 %tmp2 = fpext float %tmp1 to double
149 %tmp = tail call double @llvm.copysign.f64( double 0.0, double %tmp2 )
153 declare float @llvm.copysign.f32(float %Mag, float %Sgn)
154 declare double @llvm.copysign.f64(double %Mag, double %Sgn)