1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -O3 < %s -mcpu=haswell -mtriple=x86_64 | FileCheck %s
4 ; Verify that we are not exponentially increasing compiling time.
5 define void @tester(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13, float %14, float %15, float %16, float %17, float %18, float %19, float %20, float %21, float %22, float %23, float %24, float %25, float %26, float %27, float %28, float %29, float %30, float %31, float %32, float %33, float %34, float %35, float %36, float %37, float %38, float %39, float %40, float %41, float %42, float %43, float %44, float %45, float %46, float %47, float %48, float %49, float %50, float %51, float %52, float %53, float %54, float %55, float %56, float %57, float %58, float %59, float %60, float %61, float %62, float %63, float %64, float %65, float %66, float %67, float %68, float %69, float %70, float %71, float %72, float %73, float %74, float %75, float %76, float %77, float %78, float %79, float* %80) {
7 ; CHECK: # %bb.0: # %entry
8 ; CHECK-NEXT: vmovaps %xmm3, %xmm15
9 ; CHECK-NEXT: vmovss {{.*#+}} xmm14 = mem[0],zero,zero,zero
10 ; CHECK-NEXT: vmovss {{.*#+}} xmm10 = mem[0],zero,zero,zero
11 ; CHECK-NEXT: vmovss {{.*#+}} xmm13 = mem[0],zero,zero,zero
12 ; CHECK-NEXT: vsubss %xmm1, %xmm0, %xmm12
13 ; CHECK-NEXT: vmulss %xmm2, %xmm1, %xmm3
14 ; CHECK-NEXT: vfmsub213ss {{.*#+}} xmm3 = (xmm15 * xmm3) - xmm0
15 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm5 = -(xmm12 * xmm5) + xmm0
16 ; CHECK-NEXT: vmulss %xmm5, %xmm4, %xmm2
17 ; CHECK-NEXT: vmulss %xmm2, %xmm3, %xmm3
18 ; CHECK-NEXT: vmulss %xmm6, %xmm12, %xmm2
19 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm2 = -(xmm7 * xmm2) + xmm0
20 ; CHECK-NEXT: vmulss %xmm3, %xmm2, %xmm5
21 ; CHECK-NEXT: vmulss %xmm0, %xmm13, %xmm2
22 ; CHECK-NEXT: vmovss %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
23 ; CHECK-NEXT: vmulss %xmm2, %xmm10, %xmm2
24 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm2 = -(xmm2 * mem) + xmm0
25 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm7, %xmm3
26 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm3 = -(xmm3 * mem) + xmm0
27 ; CHECK-NEXT: vmulss %xmm3, %xmm2, %xmm2
28 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm0, %xmm3
29 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm3, %xmm4
30 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm4 = -(xmm14 * xmm4) + xmm0
31 ; CHECK-NEXT: vmulss %xmm4, %xmm5, %xmm4
32 ; CHECK-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
33 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm5 = -(xmm5 * mem) + xmm0
34 ; CHECK-NEXT: vmulss %xmm5, %xmm2, %xmm2
35 ; CHECK-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero
36 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm7, %xmm5
37 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm5 = -(xmm10 * xmm5) + xmm0
38 ; CHECK-NEXT: vmulss %xmm5, %xmm4, %xmm4
39 ; CHECK-NEXT: vmovss {{.*#+}} xmm9 = mem[0],zero,zero,zero
40 ; CHECK-NEXT: vmulss %xmm0, %xmm9, %xmm6
41 ; CHECK-NEXT: vmovss %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
42 ; CHECK-NEXT: vmulss %xmm6, %xmm14, %xmm5
43 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm5 = -(xmm12 * xmm5) + xmm0
44 ; CHECK-NEXT: vmulss %xmm5, %xmm2, %xmm2
45 ; CHECK-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
46 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm5 = -(xmm13 * xmm5) + xmm0
47 ; CHECK-NEXT: vmulss %xmm5, %xmm4, %xmm4
48 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm3, %xmm11
49 ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
50 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm3 = -(xmm11 * xmm3) + xmm0
51 ; CHECK-NEXT: vmulss %xmm3, %xmm2, %xmm2
52 ; CHECK-NEXT: vmulss %xmm2, %xmm4, %xmm2
53 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
54 ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
55 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm3 = -(xmm15 * xmm3) + xmm0
56 ; CHECK-NEXT: vmulss %xmm2, %xmm3, %xmm2
57 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
58 ; CHECK-NEXT: vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
59 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm1, %xmm4
60 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm4 = -(xmm4 * mem) + xmm0
61 ; CHECK-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
62 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm8, %xmm6
63 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm6 = -(xmm6 * mem) + xmm0
64 ; CHECK-NEXT: vmulss %xmm6, %xmm4, %xmm4
65 ; CHECK-NEXT: vmulss %xmm4, %xmm2, %xmm2
66 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
67 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
68 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
69 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm10, %xmm4
70 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm4 = -(xmm4 * mem) + xmm0
71 ; CHECK-NEXT: vmulss %xmm2, %xmm4, %xmm2
72 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
73 ; CHECK-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
74 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm4 = -(xmm1 * xmm4) + xmm0
75 ; CHECK-NEXT: vmovss {{.*#+}} xmm6 = mem[0],zero,zero,zero
76 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm6 = -(xmm6 * mem) + xmm0
77 ; CHECK-NEXT: vmulss %xmm6, %xmm4, %xmm4
78 ; CHECK-NEXT: vmulss %xmm4, %xmm2, %xmm2
79 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
80 ; CHECK-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
81 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm9, %xmm1
82 ; CHECK-NEXT: vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
83 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm4 = -(xmm1 * xmm4) + xmm0
84 ; CHECK-NEXT: vmulss %xmm2, %xmm4, %xmm10
85 ; CHECK-NEXT: vmulss %xmm0, %xmm12, %xmm6
86 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm6, %xmm4
87 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm4 = -(xmm4 * mem) + xmm0
88 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm13, %xmm5
89 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm5 = -(xmm7 * xmm5) + xmm0
90 ; CHECK-NEXT: vmulss %xmm5, %xmm4, %xmm4
91 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm10, %xmm5
92 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm5, %xmm5
93 ; CHECK-NEXT: vmulss %xmm4, %xmm5, %xmm12
94 ; CHECK-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
95 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm5 = -(xmm7 * xmm5) + xmm0
96 ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
97 ; CHECK-NEXT: vmulss %xmm6, %xmm3, %xmm2
98 ; CHECK-NEXT: vmovss {{.*#+}} xmm10 = mem[0],zero,zero,zero
99 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm2 = -(xmm10 * xmm2) + xmm0
100 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm0, %xmm9
101 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm9, %xmm1
102 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm1 = -(xmm1 * mem) + xmm0
103 ; CHECK-NEXT: vmulss %xmm2, %xmm5, %xmm2
104 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm3, %xmm5
105 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm5 = -(xmm5 * mem) + xmm0
106 ; CHECK-NEXT: vmulss %xmm1, %xmm2, %xmm1
107 ; CHECK-NEXT: vmulss %xmm5, %xmm1, %xmm1
108 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm3, %xmm2
109 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm2 = -(xmm13 * xmm2) + xmm0
110 ; CHECK-NEXT: vmulss %xmm2, %xmm1, %xmm1
111 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm12, %xmm2
112 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
113 ; CHECK-NEXT: vmulss %xmm1, %xmm2, %xmm4
114 ; CHECK-NEXT: vmovss {{.*#+}} xmm13 = mem[0],zero,zero,zero
115 ; CHECK-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
116 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm5, %xmm3
117 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm3 = -(xmm13 * xmm3) + xmm0
118 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm6, %xmm2
119 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm2 = -(xmm2 * mem) + xmm0
120 ; CHECK-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
121 ; CHECK-NEXT: # xmm1 = mem[0],zero,zero,zero
122 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm1, %xmm1
123 ; CHECK-NEXT: vmulss %xmm2, %xmm3, %xmm2
124 ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
125 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm1 = -(xmm3 * xmm1) + xmm0
126 ; CHECK-NEXT: vmulss %xmm1, %xmm2, %xmm1
127 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm4, %xmm2
128 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm2
129 ; CHECK-NEXT: vmulss %xmm1, %xmm2, %xmm1
130 ; CHECK-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 4-byte Reload
131 ; CHECK-NEXT: # xmm12 = mem[0],zero,zero,zero
132 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm12, %xmm2
133 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm7 = -(xmm7 * mem) + xmm0
134 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm2 = -(xmm13 * xmm2) + xmm0
135 ; CHECK-NEXT: vmulss %xmm7, %xmm2, %xmm2
136 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm1, %xmm1
137 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm1, %xmm1
138 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm8 = -(xmm8 * mem) + xmm0
139 ; CHECK-NEXT: vmulss %xmm2, %xmm8, %xmm2
140 ; CHECK-NEXT: vmulss %xmm2, %xmm1, %xmm1
141 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm1, %xmm1
142 ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
143 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm2 = -(xmm15 * xmm2) + xmm0
144 ; CHECK-NEXT: vmulss %xmm1, %xmm2, %xmm1
145 ; CHECK-NEXT: vmulss %xmm0, %xmm5, %xmm2
146 ; CHECK-NEXT: vmulss %xmm3, %xmm2, %xmm2
147 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm2 = -(xmm10 * xmm2) + xmm0
148 ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
149 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm3 = -(xmm5 * xmm3) + xmm0
150 ; CHECK-NEXT: vmulss %xmm2, %xmm3, %xmm2
151 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm9, %xmm8
152 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm9, %xmm4
153 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm4 = -(xmm4 * mem) + xmm0
154 ; CHECK-NEXT: vmulss %xmm4, %xmm2, %xmm2
155 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm1, %xmm1
156 ; CHECK-NEXT: vmulss %xmm2, %xmm1, %xmm10
157 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm11 = -(xmm5 * xmm11) + xmm0
158 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm6, %xmm2
159 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm2 = -(xmm15 * xmm2) + xmm0
160 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
161 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm1, %xmm4
162 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm4 = -(xmm4 * mem) + xmm0
163 ; CHECK-NEXT: vmulss %xmm2, %xmm11, %xmm2
164 ; CHECK-NEXT: vmulss %xmm4, %xmm2, %xmm2
165 ; CHECK-NEXT: vfnmadd132ss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm14 # 4-byte Folded Reload
166 ; CHECK-NEXT: # xmm14 = -(xmm14 * mem) + xmm0
167 ; CHECK-NEXT: vmulss %xmm2, %xmm14, %xmm9
168 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm0, %xmm2
169 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm11
170 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm11 = -(xmm11 * mem) + xmm0
171 ; CHECK-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
172 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm5, %xmm7
173 ; CHECK-NEXT: vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 4-byte Folded Reload
174 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm6, %xmm1
175 ; CHECK-NEXT: vmulss %xmm6, %xmm15, %xmm6
176 ; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
177 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm6 = -(xmm3 * xmm6) + xmm0
178 ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
179 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm2, %xmm4
180 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm4 = -(xmm3 * xmm4) + xmm0
181 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm7 = -(xmm3 * xmm7) + xmm0
182 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm5 = -(xmm3 * xmm5) + xmm0
183 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm12, %xmm2
184 ; CHECK-NEXT: vmulss %xmm0, %xmm13, %xmm3
185 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm3, %xmm3
186 ; CHECK-NEXT: vmovss {{.*#+}} xmm12 = mem[0],zero,zero,zero
187 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm3 = -(xmm12 * xmm3) + xmm0
188 ; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm2 = -(xmm12 * xmm2) + xmm0
189 ; CHECK-NEXT: vfmsub213ss {{.*#+}} xmm1 = (xmm15 * xmm1) - xmm0
190 ; CHECK-NEXT: vfnmadd132ss {{.*#+}} xmm8 = -(xmm8 * mem) + xmm0
191 ; CHECK-NEXT: vmulss %xmm8, %xmm9, %xmm0
192 ; CHECK-NEXT: vmulss %xmm6, %xmm0, %xmm0
193 ; CHECK-NEXT: vmulss %xmm4, %xmm0, %xmm0
194 ; CHECK-NEXT: vmulss %xmm7, %xmm0, %xmm0
195 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm10, %xmm4
196 ; CHECK-NEXT: vmulss %xmm0, %xmm4, %xmm0
197 ; CHECK-NEXT: vmulss %xmm5, %xmm11, %xmm4
198 ; CHECK-NEXT: vmulss %xmm3, %xmm4, %xmm3
199 ; CHECK-NEXT: vmulss %xmm2, %xmm3, %xmm2
200 ; CHECK-NEXT: vmulss {{[0-9]+}}(%rsp), %xmm0, %xmm0
201 ; CHECK-NEXT: vmulss %xmm1, %xmm2, %xmm1
202 ; CHECK-NEXT: vmulss %xmm1, %xmm0, %xmm0
203 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
206 %81 = fsub reassoc nsz contract float %0, %1
207 %82 = fmul reassoc nsz contract float %1, %2
208 %83 = fmul reassoc nsz contract float %3, %82
209 %84 = fsub reassoc nsz contract float %0, %83
210 %85 = fmul reassoc nsz contract float %84, %4
211 %86 = fmul reassoc nsz contract float %81, %5
212 %87 = fsub reassoc nsz contract float %0, %86
213 %88 = fmul reassoc nsz contract float %87, %85
214 %89 = fmul reassoc nsz contract float %81, %6
215 %90 = fmul reassoc nsz contract float %89, %7
216 %91 = fsub reassoc nsz contract float %0, %90
217 %92 = fmul reassoc nsz contract float %91, %88
218 %93 = fmul reassoc nsz contract float %8, %0
219 %94 = fmul reassoc nsz contract float %93, %9
220 %95 = fmul reassoc nsz contract float %94, %10
221 %96 = fsub reassoc nsz contract float %0, %95
222 %97 = fmul reassoc nsz contract float %96, %92
223 %98 = fmul reassoc nsz contract float %11, %7
224 %99 = fmul reassoc nsz contract float %98, %12
225 %100 = fsub reassoc nsz contract float %0, %99
226 %101 = fmul reassoc nsz contract float %100, %97
227 %102 = fmul reassoc nsz contract float %13, %0
228 %103 = fmul reassoc nsz contract float %102, %14
229 %104 = fmul reassoc nsz contract float %103, %15
230 %105 = fsub reassoc nsz contract float %0, %104
231 %106 = fmul reassoc nsz contract float %105, %101
232 %107 = fmul reassoc nsz contract float %16, %17
233 %108 = fsub reassoc nsz contract float %0, %107
234 %109 = fmul reassoc nsz contract float %108, %106
235 %110 = fmul reassoc nsz contract float %18, %19
236 %111 = fmul reassoc nsz contract float %110, %9
237 %112 = fsub reassoc nsz contract float %0, %111
238 %113 = fmul reassoc nsz contract float %112, %109
239 %114 = fmul reassoc nsz contract float %20, %0
240 %115 = fmul reassoc nsz contract float %114, %15
241 %116 = fmul reassoc nsz contract float %81, %115
242 %117 = fsub reassoc nsz contract float %0, %116
243 %118 = fmul reassoc nsz contract float %117, %113
244 %119 = fmul reassoc nsz contract float %8, %21
245 %120 = fsub reassoc nsz contract float %0, %119
246 %121 = fmul reassoc nsz contract float %120, %118
247 %122 = fmul reassoc nsz contract float %102, %22
248 %123 = fmul reassoc nsz contract float %122, %23
249 %124 = fsub reassoc nsz contract float %0, %123
250 %125 = fmul reassoc nsz contract float %124, %121
251 %126 = fmul reassoc nsz contract float %125, %24
252 %127 = fmul reassoc nsz contract float %3, %25
253 %128 = fsub reassoc nsz contract float %0, %127
254 %129 = fmul reassoc nsz contract float %128, %126
255 %130 = fmul reassoc nsz contract float %129, %26
256 %131 = fmul reassoc nsz contract float %27, %1
257 %132 = fmul reassoc nsz contract float %131, %28
258 %133 = fsub reassoc nsz contract float %0, %132
259 %134 = fmul reassoc nsz contract float %133, %130
260 %135 = fmul reassoc nsz contract float %29, %30
261 %136 = fmul reassoc nsz contract float %135, %31
262 %137 = fsub reassoc nsz contract float %0, %136
263 %138 = fmul reassoc nsz contract float %137, %134
264 %139 = fmul reassoc nsz contract float %138, %32
265 %140 = fmul reassoc nsz contract float %139, %33
266 %141 = fmul reassoc nsz contract float %140, %34
267 %142 = fmul reassoc nsz contract float %35, %9
268 %143 = fmul reassoc nsz contract float %142, %36
269 %144 = fsub reassoc nsz contract float %0, %143
270 %145 = fmul reassoc nsz contract float %144, %141
271 %146 = fmul reassoc nsz contract float %145, %37
272 %147 = fmul reassoc nsz contract float %1, %38
273 %148 = fsub reassoc nsz contract float %0, %147
274 %149 = fmul reassoc nsz contract float %148, %146
275 %150 = fmul reassoc nsz contract float %39, %40
276 %151 = fsub reassoc nsz contract float %0, %150
277 %152 = fmul reassoc nsz contract float %151, %149
278 %153 = fmul reassoc nsz contract float %152, %41
279 %154 = fmul reassoc nsz contract float %20, %42
280 %155 = fmul reassoc nsz contract float %154, %43
281 %156 = fsub reassoc nsz contract float %0, %155
282 %157 = fmul reassoc nsz contract float %156, %153
283 %158 = fmul reassoc nsz contract float %157, %44
284 %159 = fmul reassoc nsz contract float %158, %45
285 %160 = fmul reassoc nsz contract float %81, %0
286 %161 = fmul reassoc nsz contract float %160, %46
287 %162 = fmul reassoc nsz contract float %161, %14
288 %163 = fsub reassoc nsz contract float %0, %162
289 %164 = fmul reassoc nsz contract float %163, %159
290 %165 = fmul reassoc nsz contract float %8, %47
291 %166 = fmul reassoc nsz contract float %18, %165
292 %167 = fsub reassoc nsz contract float %0, %166
293 %168 = fmul reassoc nsz contract float %167, %164
294 %169 = fmul reassoc nsz contract float %168, %48
295 %170 = fmul reassoc nsz contract float %169, %49
296 %171 = fmul reassoc nsz contract float %18, %50
297 %172 = fsub reassoc nsz contract float %0, %171
298 %173 = fmul reassoc nsz contract float %172, %170
299 %174 = fmul reassoc nsz contract float %16, %160
300 %175 = fmul reassoc nsz contract float %174, %12
301 %176 = fsub reassoc nsz contract float %0, %175
302 %177 = fmul reassoc nsz contract float %176, %173
303 %178 = fmul reassoc nsz contract float %51, %0
304 %179 = fmul reassoc nsz contract float %178, %22
305 %180 = fmul reassoc nsz contract float %179, %52
306 %181 = fsub reassoc nsz contract float %0, %180
307 %182 = fmul reassoc nsz contract float %181, %177
308 %183 = fmul reassoc nsz contract float %27, %16
309 %184 = fmul reassoc nsz contract float %183, %53
310 %185 = fsub reassoc nsz contract float %0, %184
311 %186 = fmul reassoc nsz contract float %185, %182
312 %187 = fmul reassoc nsz contract float %16, %54
313 %188 = fmul reassoc nsz contract float %8, %187
314 %189 = fsub reassoc nsz contract float %0, %188
315 %190 = fmul reassoc nsz contract float %189, %186
316 %191 = fmul reassoc nsz contract float %190, %55
317 %192 = fmul reassoc nsz contract float %191, %56
318 %193 = fmul reassoc nsz contract float %57, %58
319 %194 = fmul reassoc nsz contract float %193, %59
320 %195 = fsub reassoc nsz contract float %0, %194
321 %196 = fmul reassoc nsz contract float %195, %192
322 %197 = fmul reassoc nsz contract float %13, %160
323 %198 = fmul reassoc nsz contract float %197, %36
324 %199 = fsub reassoc nsz contract float %0, %198
325 %200 = fmul reassoc nsz contract float %199, %196
326 %201 = fmul reassoc nsz contract float %93, %60
327 %202 = fmul reassoc nsz contract float %201, %61
328 %203 = fsub reassoc nsz contract float %0, %202
329 %204 = fmul reassoc nsz contract float %203, %200
330 %205 = fmul reassoc nsz contract float %204, %62
331 %206 = fmul reassoc nsz contract float %205, %63
332 %207 = fmul reassoc nsz contract float %114, %9
333 %208 = fmul reassoc nsz contract float %207, %59
334 %209 = fsub reassoc nsz contract float %0, %208
335 %210 = fmul reassoc nsz contract float %209, %206
336 %211 = fmul reassoc nsz contract float %18, %64
337 %212 = fsub reassoc nsz contract float %0, %211
338 %213 = fmul reassoc nsz contract float %212, %210
339 %214 = fmul reassoc nsz contract float %29, %65
340 %215 = fsub reassoc nsz contract float %0, %214
341 %216 = fmul reassoc nsz contract float %215, %213
342 %217 = fmul reassoc nsz contract float %216, %66
343 %218 = fmul reassoc nsz contract float %3, %67
344 %219 = fsub reassoc nsz contract float %0, %218
345 %220 = fmul reassoc nsz contract float %219, %217
346 %221 = fmul reassoc nsz contract float %220, %68
347 %222 = fmul reassoc nsz contract float %57, %69
348 %223 = fsub reassoc nsz contract float %0, %222
349 %224 = fmul reassoc nsz contract float %223, %221
350 %225 = fmul reassoc nsz contract float %57, %0
351 %226 = fmul reassoc nsz contract float %225, %61
352 %227 = fmul reassoc nsz contract float %226, %12
353 %228 = fsub reassoc nsz contract float %0, %227
354 %229 = fmul reassoc nsz contract float %228, %224
355 %230 = fmul reassoc nsz contract float %178, %70
356 %231 = fmul reassoc nsz contract float %230, %46
357 %232 = fsub reassoc nsz contract float %0, %231
358 %233 = fmul reassoc nsz contract float %232, %229
359 %234 = fmul reassoc nsz contract float %233, %71
360 %235 = fmul reassoc nsz contract float %57, %122
361 %236 = fsub reassoc nsz contract float %0, %235
362 %237 = fmul reassoc nsz contract float %236, %234
363 %238 = fmul reassoc nsz contract float %20, %160
364 %239 = fmul reassoc nsz contract float %3, %238
365 %240 = fsub reassoc nsz contract float %0, %239
366 %241 = fmul reassoc nsz contract float %240, %237
367 %242 = fmul reassoc nsz contract float %16, %72
368 %243 = fmul reassoc nsz contract float %242, %73
369 %244 = fsub reassoc nsz contract float %0, %243
370 %245 = fmul reassoc nsz contract float %244, %241
371 %246 = fmul reassoc nsz contract float %154, %15
372 %247 = fsub reassoc nsz contract float %0, %246
373 %248 = fmul reassoc nsz contract float %247, %245
374 %249 = fmul reassoc nsz contract float %178, %23
375 %250 = fmul reassoc nsz contract float %249, %74
376 %251 = fsub reassoc nsz contract float %0, %250
377 %252 = fmul reassoc nsz contract float %251, %248
378 %253 = fmul reassoc nsz contract float %3, %160
379 %254 = fmul reassoc nsz contract float %51, %253
380 %255 = fsub reassoc nsz contract float %0, %254
381 %256 = fmul reassoc nsz contract float %255, %252
382 %257 = fmul reassoc nsz contract float %13, %75
383 %258 = fmul reassoc nsz contract float %257, %51
384 %259 = fsub reassoc nsz contract float %0, %258
385 %260 = fmul reassoc nsz contract float %259, %256
386 %261 = fmul reassoc nsz contract float %8, %76
387 %262 = fmul reassoc nsz contract float %51, %261
388 %263 = fsub reassoc nsz contract float %0, %262
389 %264 = fmul reassoc nsz contract float %263, %260
390 %265 = fmul reassoc nsz contract float %264, %77
391 %266 = fmul reassoc nsz contract float %39, %0
392 %267 = fmul reassoc nsz contract float %266, %78
393 %268 = fmul reassoc nsz contract float %267, %14
394 %269 = fsub reassoc nsz contract float %0, %268
395 %270 = fmul reassoc nsz contract float %269, %265
396 %271 = fmul reassoc nsz contract float %1, %76
397 %272 = fmul reassoc nsz contract float %51, %271
398 %273 = fsub reassoc nsz contract float %0, %272
399 %274 = fmul reassoc nsz contract float %273, %270
400 %275 = fmul reassoc nsz contract float %0, %59
401 %276 = fmul reassoc nsz contract float %275, %79
402 %277 = fmul reassoc nsz contract float %276, %36
403 %278 = fsub reassoc nsz contract float %0, %277
404 %279 = fmul reassoc nsz contract float %278, %274
405 %280 = fmul reassoc nsz contract float %114, %22
406 %281 = fmul reassoc nsz contract float %280, %36
407 %282 = fsub reassoc nsz contract float %0, %281
408 %283 = fmul reassoc nsz contract float %282, %279
409 %284 = fmul reassoc nsz contract float %0, %43
410 %285 = fmul reassoc nsz contract float %284, %81
411 %286 = fmul reassoc nsz contract float %3, %285
412 %287 = fsub reassoc nsz contract float %0, %286
413 %288 = fmul reassoc nsz contract float %287, %283
414 store float %288, float* %80, align 4