1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -O3 -verify-machineinstrs | FileCheck %s
4 define void @fnmaddd(ptr %a, ptr %b, ptr %c) {
5 ; CHECK-LABEL: fnmaddd:
6 ; CHECK: // %bb.0: // %entry
7 ; CHECK-NEXT: ldr d0, [x0]
8 ; CHECK-NEXT: ldr d1, [x1]
9 ; CHECK-NEXT: ldr d2, [x2]
10 ; CHECK-NEXT: fnmadd d0, d1, d0, d2
11 ; CHECK-NEXT: str d0, [x0]
14 %0 = load double, ptr %a, align 8
15 %1 = load double, ptr %b, align 8
16 %mul = fmul fast double %1, %0
17 %2 = load double, ptr %c, align 8
18 %add = fadd fast double %mul, %2
19 %fneg = fneg fast double %add
20 store double %fneg, ptr %a, align 8
24 ; Don't combine: No flags
25 define void @fnmaddd_no_fast(ptr %a, ptr %b, ptr %c) {
26 ; CHECK-LABEL: fnmaddd_no_fast:
27 ; CHECK: // %bb.0: // %entry
28 ; CHECK-NEXT: ldr d0, [x0]
29 ; CHECK-NEXT: ldr d1, [x1]
30 ; CHECK-NEXT: fmul d0, d1, d0
31 ; CHECK-NEXT: ldr d1, [x2]
32 ; CHECK-NEXT: fadd d0, d0, d1
33 ; CHECK-NEXT: fneg d0, d0
34 ; CHECK-NEXT: str d0, [x0]
37 %0 = load double, ptr %a, align 8
38 %1 = load double, ptr %b, align 8
39 %mul = fmul double %1, %0
40 %2 = load double, ptr %c, align 8
41 %add = fadd double %mul, %2
42 %fneg = fneg double %add
43 store double %fneg, ptr %a, align 8
47 define void @fnmadds(ptr %a, ptr %b, ptr %c) {
48 ; CHECK-LABEL: fnmadds:
49 ; CHECK: // %bb.0: // %entry
50 ; CHECK-NEXT: ldr s0, [x0]
51 ; CHECK-NEXT: ldr s1, [x1]
52 ; CHECK-NEXT: ldr s2, [x2]
53 ; CHECK-NEXT: fnmadd s0, s1, s0, s2
54 ; CHECK-NEXT: str s0, [x0]
57 %0 = load float, ptr %a, align 4
58 %1 = load float, ptr %b, align 4
59 %mul = fmul fast float %1, %0
60 %2 = load float, ptr %c, align 4
61 %add = fadd fast float %mul, %2
62 %fneg = fneg fast float %add
63 store float %fneg, ptr %a, align 4
67 define void @fnmadds_nsz_contract(ptr %a, ptr %b, ptr %c) {
68 ; CHECK-LABEL: fnmadds_nsz_contract:
69 ; CHECK: // %bb.0: // %entry
70 ; CHECK-NEXT: ldr s0, [x0]
71 ; CHECK-NEXT: ldr s1, [x1]
72 ; CHECK-NEXT: ldr s2, [x2]
73 ; CHECK-NEXT: fnmadd s0, s1, s0, s2
74 ; CHECK-NEXT: str s0, [x0]
77 %0 = load float, ptr %a, align 4
78 %1 = load float, ptr %b, align 4
79 %mul = fmul contract nsz float %1, %0
80 %2 = load float, ptr %c, align 4
81 %add = fadd contract nsz float %mul, %2
82 %fneg = fneg contract nsz float %add
83 store float %fneg, ptr %a, align 4
87 ; Don't combine: Missing nsz
88 define void @fnmadds_contract(ptr %a, ptr %b, ptr %c) {
89 ; CHECK-LABEL: fnmadds_contract:
90 ; CHECK: // %bb.0: // %entry
91 ; CHECK-NEXT: ldr s0, [x0]
92 ; CHECK-NEXT: ldr s1, [x1]
93 ; CHECK-NEXT: ldr s2, [x2]
94 ; CHECK-NEXT: fmadd s0, s1, s0, s2
95 ; CHECK-NEXT: fneg s0, s0
96 ; CHECK-NEXT: str s0, [x0]
99 %0 = load float, ptr %a, align 4
100 %1 = load float, ptr %b, align 4
101 %mul = fmul contract float %1, %0
102 %2 = load float, ptr %c, align 4
103 %add = fadd contract float %mul, %2
104 %fneg = fneg contract float %add
105 store float %fneg, ptr %a, align 4
109 ; Don't combine: Missing contract
110 define void @fnmadds_nsz(ptr %a, ptr %b, ptr %c) {
111 ; CHECK-LABEL: fnmadds_nsz:
112 ; CHECK: // %bb.0: // %entry
113 ; CHECK-NEXT: ldr s0, [x0]
114 ; CHECK-NEXT: ldr s1, [x1]
115 ; CHECK-NEXT: fmul s0, s1, s0
116 ; CHECK-NEXT: ldr s1, [x2]
117 ; CHECK-NEXT: fadd s0, s0, s1
118 ; CHECK-NEXT: fneg s0, s0
119 ; CHECK-NEXT: str s0, [x0]
122 %0 = load float, ptr %a, align 4
123 %1 = load float, ptr %b, align 4
124 %mul = fmul nsz float %1, %0
125 %2 = load float, ptr %c, align 4
126 %add = fadd nsz float %mul, %2
127 %fneg = fneg nsz float %add
128 store float %fneg, ptr %a, align 4
132 define void @fnmaddd_two_uses(ptr %a, ptr %b, ptr %c, ptr %d) {
133 ; CHECK-LABEL: fnmaddd_two_uses:
134 ; CHECK: // %bb.0: // %entry
135 ; CHECK-NEXT: ldr d0, [x1]
136 ; CHECK-NEXT: ldr d1, [x0]
137 ; CHECK-NEXT: ldr d2, [x2]
138 ; CHECK-NEXT: fmadd d0, d0, d1, d2
139 ; CHECK-NEXT: fneg d1, d0
140 ; CHECK-NEXT: str d1, [x0]
141 ; CHECK-NEXT: str d0, [x3]
144 %0 = load double, ptr %a, align 8
145 %1 = load double, ptr %b, align 8
146 %mul = fmul fast double %1, %0
147 %2 = load double, ptr %c, align 8
148 %add = fadd fast double %mul, %2
149 %fneg1 = fneg fast double %add
150 store double %fneg1, ptr %a, align 8
151 store double %add, ptr %d, align 8