1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s -check-prefix=X64
3 ; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86
5 declare float @llvm.sqrt.f32(float %x);
7 define dso_local float @fast_recip_sqrt(float %x) {
8 ; X64-LABEL: fast_recip_sqrt:
10 ; X64-NEXT: rsqrtss %xmm0, %xmm1
11 ; X64-NEXT: mulss %xmm1, %xmm0
12 ; X64-NEXT: mulss %xmm1, %xmm0
13 ; X64-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
14 ; X64-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
15 ; X64-NEXT: mulss %xmm1, %xmm0
18 ; X86-LABEL: fast_recip_sqrt:
20 ; X86-NEXT: flds {{[0-9]+}}(%esp)
23 ; X86-NEXT: fdivp %st, %st(1)
25 %y = call fast float @llvm.sqrt.f32(float %x)
26 %z = fdiv fast float 1.0, %y
30 declare float @llvm.fmuladd.f32(float %a, float %b, float %c);
32 define dso_local float @fast_fmuladd_opts(float %a , float %b , float %c) {
33 ; X64-LABEL: fast_fmuladd_opts:
35 ; X64-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
38 ; X86-LABEL: fast_fmuladd_opts:
40 ; X86-NEXT: flds {{[0-9]+}}(%esp)
41 ; X86-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
43 %res = call fast float @llvm.fmuladd.f32(float %a, float 2.0, float %a)
47 ; The multiply is strict.
49 @mul1 = common dso_local global double 0.000000e+00, align 4
51 define dso_local double @not_so_fast_mul_add(double %x) {
52 ; X64-LABEL: not_so_fast_mul_add:
54 ; X64-NEXT: movsd {{.*#+}} xmm1 = [4.2000000000000002E+0,0.0E+0]
55 ; X64-NEXT: mulsd %xmm0, %xmm1
56 ; X64-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
57 ; X64-NEXT: movsd %xmm1, mul1(%rip)
60 ; X86-LABEL: not_so_fast_mul_add:
62 ; X86-NEXT: fldl {{[0-9]+}}(%esp)
63 ; X86-NEXT: fld %st(0)
64 ; X86-NEXT: fmull {{\.?LCPI[0-9]+_[0-9]+}}
65 ; X86-NEXT: fxch %st(1)
66 ; X86-NEXT: fmull {{\.?LCPI[0-9]+_[0-9]+}}
67 ; X86-NEXT: fxch %st(1)
68 ; X86-NEXT: fstpl mul1
70 %m = fmul double %x, 4.2
71 %a = fadd fast double %m, %x
72 store double %m, ptr @mul1, align 4
78 @sqrt1 = common dso_local global float 0.000000e+00, align 4
80 define dso_local float @not_so_fast_recip_sqrt(float %x) {
81 ; X64-LABEL: not_so_fast_recip_sqrt:
83 ; X64-NEXT: rsqrtss %xmm0, %xmm1
84 ; X64-NEXT: sqrtss %xmm0, %xmm2
85 ; X64-NEXT: mulss %xmm1, %xmm0
86 ; X64-NEXT: mulss %xmm1, %xmm0
87 ; X64-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
88 ; X64-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
89 ; X64-NEXT: mulss %xmm1, %xmm0
90 ; X64-NEXT: movss %xmm2, sqrt1(%rip)
93 ; X86-LABEL: not_so_fast_recip_sqrt:
95 ; X86-NEXT: flds {{[0-9]+}}(%esp)
98 ; X86-NEXT: fdiv %st(1), %st
99 ; X86-NEXT: fxch %st(1)
100 ; X86-NEXT: fstps sqrt1
102 %y = call float @llvm.sqrt.f32(float %x)
103 %z = fdiv fast float 1.0, %y
104 store float %y, ptr @sqrt1, align 4
105 %ret = fadd float %z , 14.5
109 define dso_local float @div_arcp_by_const(half %x) {
110 ; X64-LABEL: div_arcp_by_const:
112 ; X64-NEXT: pushq %rax
113 ; X64-NEXT: .cfi_def_cfa_offset 16
114 ; X64-NEXT: callq __extendhfsf2@PLT
115 ; X64-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
116 ; X64-NEXT: callq __truncsfhf2@PLT
117 ; X64-NEXT: popq %rax
118 ; X64-NEXT: .cfi_def_cfa_offset 8
119 ; X64-NEXT: jmp __extendhfsf2@PLT # TAILCALL
121 ; X86-LABEL: div_arcp_by_const:
123 ; X86-NEXT: pushl %eax
124 ; X86-NEXT: .cfi_def_cfa_offset 8
125 ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
126 ; X86-NEXT: movl %eax, (%esp)
127 ; X86-NEXT: calll __gnu_h2f_ieee
128 ; X86-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
129 ; X86-NEXT: fstps (%esp)
130 ; X86-NEXT: calll __gnu_f2h_ieee
131 ; X86-NEXT: movzwl %ax, %eax
132 ; X86-NEXT: movl %eax, (%esp)
133 ; X86-NEXT: calll __gnu_h2f_ieee
134 ; X86-NEXT: popl %eax
135 ; X86-NEXT: .cfi_def_cfa_offset 4
137 %rcp = fdiv arcp half %x, 10.0
138 %z = fpext half %rcp to float