1 ; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
2 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
3 ; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
4 ; RUN: llc < %s -march=arm -mcpu=cortex-a8 -regalloc=basic | FileCheck %s -check-prefix=A8
6 define float @t1(float %acc, float %a, float %b) nounwind {
15 ; A8: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
16 ; A8: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
17 %0 = fmul float %a, %b
18 %1 = fsub float -0.0, %0
19 %2 = fsub float %1, %acc
23 define float @t2(float %acc, float %a, float %b) nounwind {
32 ; A8: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}}
33 ; A8: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
34 %0 = fmul float %a, %b
35 %1 = fmul float -1.0, %0
36 %2 = fsub float %1, %acc
40 define double @t3(double %acc, double %a, double %b) nounwind {
49 ; A8: vnmul.f64 d1{{[67]}}, d1{{[67]}}, d1{{[67]}}
50 ; A8: vsub.f64 d1{{[67]}}, d1{{[67]}}, d1{{[67]}}
51 %0 = fmul double %a, %b
52 %1 = fsub double -0.0, %0
53 %2 = fsub double %1, %acc
57 define double @t4(double %acc, double %a, double %b) nounwind {
66 ; A8: vnmul.f64 d1{{[67]}}, d1{{[67]}}, d1{{[67]}}
67 ; A8: vsub.f64 d1{{[67]}}, d1{{[67]}}, d1{{[67]}}
68 %0 = fmul double %a, %b
69 %1 = fmul double -1.0, %0
70 %2 = fsub double %1, %acc