1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE --check-prefix=SSE-RECIP
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX-RECIP
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=FMA-RECIP
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=BDVER2
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=BTVER2
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=SANDY
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=HASWELL
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=haswell -mattr=-fma | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=HASWELL-NO-FMA
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512 --check-prefix=KNL
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512 --check-prefix=SKX
13 ; It's the extra tests coverage for recip as discussed on D26855.
15 define float @f32_no_step_2(float %x) #3 {
16 ; SSE-LABEL: f32_no_step_2:
18 ; SSE-NEXT: rcpss %xmm0, %xmm0
19 ; SSE-NEXT: mulss {{.*}}(%rip), %xmm0
22 ; AVX-LABEL: f32_no_step_2:
24 ; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0
25 ; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
27 %div = fdiv fast float 1234.0, %x
31 define float @f32_one_step_2(float %x) #1 {
32 ; SSE-LABEL: f32_one_step_2:
34 ; SSE-NEXT: rcpss %xmm0, %xmm2
35 ; SSE-NEXT: mulss %xmm2, %xmm0
36 ; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
37 ; SSE-NEXT: subss %xmm0, %xmm1
38 ; SSE-NEXT: mulss %xmm2, %xmm1
39 ; SSE-NEXT: addss %xmm2, %xmm1
40 ; SSE-NEXT: mulss {{.*}}(%rip), %xmm1
41 ; SSE-NEXT: movaps %xmm1, %xmm0
44 ; AVX-RECIP-LABEL: f32_one_step_2:
46 ; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
47 ; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
48 ; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
49 ; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
50 ; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
51 ; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
52 ; AVX-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
53 ; AVX-RECIP-NEXT: retq
55 ; FMA-RECIP-LABEL: f32_one_step_2:
57 ; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
58 ; FMA-RECIP-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
59 ; FMA-RECIP-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
60 ; FMA-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
61 ; FMA-RECIP-NEXT: retq
63 ; BDVER2-LABEL: f32_one_step_2:
65 ; BDVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
66 ; BDVER2-NEXT: vfnmaddss {{.*}}(%rip), %xmm1, %xmm0, %xmm0
67 ; BDVER2-NEXT: vfmaddss %xmm1, %xmm0, %xmm1, %xmm0
68 ; BDVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
71 ; BTVER2-LABEL: f32_one_step_2:
73 ; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
74 ; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
75 ; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0
76 ; BTVER2-NEXT: vsubss %xmm0, %xmm2, %xmm0
77 ; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0
78 ; BTVER2-NEXT: vaddss %xmm0, %xmm1, %xmm0
79 ; BTVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
82 ; SANDY-LABEL: f32_one_step_2:
84 ; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1
85 ; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0
86 ; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
87 ; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0
88 ; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0
89 ; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0
90 ; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
93 ; HASWELL-LABEL: f32_one_step_2:
95 ; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1
96 ; HASWELL-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
97 ; HASWELL-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
98 ; HASWELL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
101 ; HASWELL-NO-FMA-LABEL: f32_one_step_2:
102 ; HASWELL-NO-FMA: # %bb.0:
103 ; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
104 ; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0
105 ; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
106 ; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0
107 ; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0
108 ; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0
109 ; HASWELL-NO-FMA-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
110 ; HASWELL-NO-FMA-NEXT: retq
112 ; AVX512-LABEL: f32_one_step_2:
114 ; AVX512-NEXT: vrcpss %xmm0, %xmm0, %xmm1
115 ; AVX512-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
116 ; AVX512-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
117 ; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
119 %div = fdiv fast float 3456.0, %x
123 define float @f32_one_step_2_divs(float %x) #1 {
124 ; SSE-LABEL: f32_one_step_2_divs:
126 ; SSE-NEXT: rcpss %xmm0, %xmm1
127 ; SSE-NEXT: mulss %xmm1, %xmm0
128 ; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
129 ; SSE-NEXT: subss %xmm0, %xmm2
130 ; SSE-NEXT: mulss %xmm1, %xmm2
131 ; SSE-NEXT: addss %xmm1, %xmm2
132 ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
133 ; SSE-NEXT: mulss %xmm2, %xmm0
134 ; SSE-NEXT: mulss %xmm2, %xmm0
137 ; AVX-RECIP-LABEL: f32_one_step_2_divs:
138 ; AVX-RECIP: # %bb.0:
139 ; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
140 ; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
141 ; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
142 ; AVX-RECIP-NEXT: vsubss %xmm0, %xmm2, %xmm0
143 ; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
144 ; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
145 ; AVX-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
146 ; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
147 ; AVX-RECIP-NEXT: retq
149 ; FMA-RECIP-LABEL: f32_one_step_2_divs:
150 ; FMA-RECIP: # %bb.0:
151 ; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
152 ; FMA-RECIP-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
153 ; FMA-RECIP-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
154 ; FMA-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
155 ; FMA-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
156 ; FMA-RECIP-NEXT: retq
158 ; BDVER2-LABEL: f32_one_step_2_divs:
160 ; BDVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
161 ; BDVER2-NEXT: vfnmaddss {{.*}}(%rip), %xmm1, %xmm0, %xmm0
162 ; BDVER2-NEXT: vfmaddss %xmm1, %xmm0, %xmm1, %xmm0
163 ; BDVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
164 ; BDVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0
167 ; BTVER2-LABEL: f32_one_step_2_divs:
169 ; BTVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
170 ; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
171 ; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0
172 ; BTVER2-NEXT: vsubss %xmm0, %xmm2, %xmm0
173 ; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0
174 ; BTVER2-NEXT: vaddss %xmm0, %xmm1, %xmm0
175 ; BTVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
176 ; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0
179 ; SANDY-LABEL: f32_one_step_2_divs:
181 ; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1
182 ; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0
183 ; SANDY-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
184 ; SANDY-NEXT: vsubss %xmm0, %xmm2, %xmm0
185 ; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0
186 ; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0
187 ; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
188 ; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0
191 ; HASWELL-LABEL: f32_one_step_2_divs:
193 ; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1
194 ; HASWELL-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
195 ; HASWELL-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
196 ; HASWELL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
197 ; HASWELL-NEXT: vmulss %xmm0, %xmm1, %xmm0
200 ; HASWELL-NO-FMA-LABEL: f32_one_step_2_divs:
201 ; HASWELL-NO-FMA: # %bb.0:
202 ; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
203 ; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0
204 ; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
205 ; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm2, %xmm0
206 ; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0
207 ; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0
208 ; HASWELL-NO-FMA-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
209 ; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0
210 ; HASWELL-NO-FMA-NEXT: retq
212 ; AVX512-LABEL: f32_one_step_2_divs:
214 ; AVX512-NEXT: vrcpss %xmm0, %xmm0, %xmm1
215 ; AVX512-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
216 ; AVX512-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
217 ; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
218 ; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0
220 %div = fdiv fast float 3456.0, %x
221 %div2 = fdiv fast float %div, %x
225 define float @f32_two_step_2(float %x) #2 {
226 ; SSE-LABEL: f32_two_step_2:
228 ; SSE-NEXT: rcpss %xmm0, %xmm2
229 ; SSE-NEXT: movaps %xmm0, %xmm3
230 ; SSE-NEXT: mulss %xmm2, %xmm3
231 ; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
232 ; SSE-NEXT: movaps %xmm1, %xmm4
233 ; SSE-NEXT: subss %xmm3, %xmm4
234 ; SSE-NEXT: mulss %xmm2, %xmm4
235 ; SSE-NEXT: addss %xmm2, %xmm4
236 ; SSE-NEXT: mulss %xmm4, %xmm0
237 ; SSE-NEXT: subss %xmm0, %xmm1
238 ; SSE-NEXT: mulss %xmm4, %xmm1
239 ; SSE-NEXT: addss %xmm4, %xmm1
240 ; SSE-NEXT: mulss {{.*}}(%rip), %xmm1
241 ; SSE-NEXT: movaps %xmm1, %xmm0
244 ; AVX-RECIP-LABEL: f32_two_step_2:
245 ; AVX-RECIP: # %bb.0:
246 ; AVX-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
247 ; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm2
248 ; AVX-RECIP-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
249 ; AVX-RECIP-NEXT: vsubss %xmm2, %xmm3, %xmm2
250 ; AVX-RECIP-NEXT: vmulss %xmm2, %xmm1, %xmm2
251 ; AVX-RECIP-NEXT: vaddss %xmm2, %xmm1, %xmm1
252 ; AVX-RECIP-NEXT: vmulss %xmm1, %xmm0, %xmm0
253 ; AVX-RECIP-NEXT: vsubss %xmm0, %xmm3, %xmm0
254 ; AVX-RECIP-NEXT: vmulss %xmm0, %xmm1, %xmm0
255 ; AVX-RECIP-NEXT: vaddss %xmm0, %xmm1, %xmm0
256 ; AVX-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
257 ; AVX-RECIP-NEXT: retq
259 ; FMA-RECIP-LABEL: f32_two_step_2:
260 ; FMA-RECIP: # %bb.0:
261 ; FMA-RECIP-NEXT: vrcpss %xmm0, %xmm0, %xmm1
262 ; FMA-RECIP-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
263 ; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
264 ; FMA-RECIP-NEXT: vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
265 ; FMA-RECIP-NEXT: vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
266 ; FMA-RECIP-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
267 ; FMA-RECIP-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
268 ; FMA-RECIP-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
269 ; FMA-RECIP-NEXT: retq
271 ; BDVER2-LABEL: f32_two_step_2:
273 ; BDVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
274 ; BDVER2-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
275 ; BDVER2-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm3
276 ; BDVER2-NEXT: vfmaddss %xmm1, %xmm3, %xmm1, %xmm1
277 ; BDVER2-NEXT: vfnmaddss %xmm2, %xmm1, %xmm0, %xmm0
278 ; BDVER2-NEXT: vfmaddss %xmm1, %xmm0, %xmm1, %xmm0
279 ; BDVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
282 ; BTVER2-LABEL: f32_two_step_2:
284 ; BTVER2-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
285 ; BTVER2-NEXT: vrcpss %xmm0, %xmm0, %xmm1
286 ; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm2
287 ; BTVER2-NEXT: vsubss %xmm2, %xmm3, %xmm2
288 ; BTVER2-NEXT: vmulss %xmm2, %xmm1, %xmm2
289 ; BTVER2-NEXT: vaddss %xmm2, %xmm1, %xmm1
290 ; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0
291 ; BTVER2-NEXT: vsubss %xmm0, %xmm3, %xmm0
292 ; BTVER2-NEXT: vmulss %xmm0, %xmm1, %xmm0
293 ; BTVER2-NEXT: vaddss %xmm0, %xmm1, %xmm0
294 ; BTVER2-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
297 ; SANDY-LABEL: f32_two_step_2:
299 ; SANDY-NEXT: vrcpss %xmm0, %xmm0, %xmm1
300 ; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm2
301 ; SANDY-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
302 ; SANDY-NEXT: vsubss %xmm2, %xmm3, %xmm2
303 ; SANDY-NEXT: vmulss %xmm2, %xmm1, %xmm2
304 ; SANDY-NEXT: vaddss %xmm2, %xmm1, %xmm1
305 ; SANDY-NEXT: vmulss %xmm1, %xmm0, %xmm0
306 ; SANDY-NEXT: vsubss %xmm0, %xmm3, %xmm0
307 ; SANDY-NEXT: vmulss %xmm0, %xmm1, %xmm0
308 ; SANDY-NEXT: vaddss %xmm0, %xmm1, %xmm0
309 ; SANDY-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
312 ; HASWELL-LABEL: f32_two_step_2:
314 ; HASWELL-NEXT: vrcpss %xmm0, %xmm0, %xmm1
315 ; HASWELL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
316 ; HASWELL-NEXT: vmovaps %xmm1, %xmm3
317 ; HASWELL-NEXT: vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
318 ; HASWELL-NEXT: vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
319 ; HASWELL-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
320 ; HASWELL-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
321 ; HASWELL-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
324 ; HASWELL-NO-FMA-LABEL: f32_two_step_2:
325 ; HASWELL-NO-FMA: # %bb.0:
326 ; HASWELL-NO-FMA-NEXT: vrcpss %xmm0, %xmm0, %xmm1
327 ; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm2
328 ; HASWELL-NO-FMA-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
329 ; HASWELL-NO-FMA-NEXT: vsubss %xmm2, %xmm3, %xmm2
330 ; HASWELL-NO-FMA-NEXT: vmulss %xmm2, %xmm1, %xmm2
331 ; HASWELL-NO-FMA-NEXT: vaddss %xmm2, %xmm1, %xmm1
332 ; HASWELL-NO-FMA-NEXT: vmulss %xmm1, %xmm0, %xmm0
333 ; HASWELL-NO-FMA-NEXT: vsubss %xmm0, %xmm3, %xmm0
334 ; HASWELL-NO-FMA-NEXT: vmulss %xmm0, %xmm1, %xmm0
335 ; HASWELL-NO-FMA-NEXT: vaddss %xmm0, %xmm1, %xmm0
336 ; HASWELL-NO-FMA-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
337 ; HASWELL-NO-FMA-NEXT: retq
339 ; AVX512-LABEL: f32_two_step_2:
341 ; AVX512-NEXT: vrcpss %xmm0, %xmm0, %xmm1
342 ; AVX512-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
343 ; AVX512-NEXT: vmovaps %xmm1, %xmm3
344 ; AVX512-NEXT: vfnmadd213ss {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
345 ; AVX512-NEXT: vfmadd132ss {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
346 ; AVX512-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
347 ; AVX512-NEXT: vfmadd132ss {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
348 ; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
350 %div = fdiv fast float 6789.0, %x
354 define <4 x float> @v4f32_one_step2(<4 x float> %x) #1 {
355 ; SSE-LABEL: v4f32_one_step2:
357 ; SSE-NEXT: rcpps %xmm0, %xmm2
358 ; SSE-NEXT: mulps %xmm2, %xmm0
359 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
360 ; SSE-NEXT: subps %xmm0, %xmm1
361 ; SSE-NEXT: mulps %xmm2, %xmm1
362 ; SSE-NEXT: addps %xmm2, %xmm1
363 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
364 ; SSE-NEXT: movaps %xmm1, %xmm0
367 ; AVX-RECIP-LABEL: v4f32_one_step2:
368 ; AVX-RECIP: # %bb.0:
369 ; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
370 ; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
371 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
372 ; AVX-RECIP-NEXT: vsubps %xmm0, %xmm2, %xmm0
373 ; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
374 ; AVX-RECIP-NEXT: vaddps %xmm0, %xmm1, %xmm0
375 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
376 ; AVX-RECIP-NEXT: retq
378 ; FMA-RECIP-LABEL: v4f32_one_step2:
379 ; FMA-RECIP: # %bb.0:
380 ; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
381 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
382 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
383 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
384 ; FMA-RECIP-NEXT: retq
386 ; BDVER2-LABEL: v4f32_one_step2:
388 ; BDVER2-NEXT: vrcpps %xmm0, %xmm1
389 ; BDVER2-NEXT: vfnmaddps {{.*}}(%rip), %xmm1, %xmm0, %xmm0
390 ; BDVER2-NEXT: vfmaddps %xmm1, %xmm0, %xmm1, %xmm0
391 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
394 ; BTVER2-LABEL: v4f32_one_step2:
396 ; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
397 ; BTVER2-NEXT: vrcpps %xmm0, %xmm1
398 ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0
399 ; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0
400 ; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0
401 ; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0
402 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
405 ; SANDY-LABEL: v4f32_one_step2:
407 ; SANDY-NEXT: vrcpps %xmm0, %xmm1
408 ; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0
409 ; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
410 ; SANDY-NEXT: vsubps %xmm0, %xmm2, %xmm0
411 ; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0
412 ; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0
413 ; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
416 ; HASWELL-LABEL: v4f32_one_step2:
418 ; HASWELL-NEXT: vrcpps %xmm0, %xmm1
419 ; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
420 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
421 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
422 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
425 ; HASWELL-NO-FMA-LABEL: v4f32_one_step2:
426 ; HASWELL-NO-FMA: # %bb.0:
427 ; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
428 ; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
429 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
430 ; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0
431 ; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
432 ; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0
433 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
434 ; HASWELL-NO-FMA-NEXT: retq
436 ; KNL-LABEL: v4f32_one_step2:
438 ; KNL-NEXT: vrcpps %xmm0, %xmm1
439 ; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
440 ; KNL-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
441 ; KNL-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
442 ; KNL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
445 ; SKX-LABEL: v4f32_one_step2:
447 ; SKX-NEXT: vrcpps %xmm0, %xmm1
448 ; SKX-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
449 ; SKX-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
450 ; SKX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
452 %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
456 define <4 x float> @v4f32_one_step_2_divs(<4 x float> %x) #1 {
457 ; SSE-LABEL: v4f32_one_step_2_divs:
459 ; SSE-NEXT: rcpps %xmm0, %xmm1
460 ; SSE-NEXT: mulps %xmm1, %xmm0
461 ; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
462 ; SSE-NEXT: subps %xmm0, %xmm2
463 ; SSE-NEXT: mulps %xmm1, %xmm2
464 ; SSE-NEXT: addps %xmm1, %xmm2
465 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0]
466 ; SSE-NEXT: mulps %xmm2, %xmm0
467 ; SSE-NEXT: mulps %xmm2, %xmm0
470 ; AVX-RECIP-LABEL: v4f32_one_step_2_divs:
471 ; AVX-RECIP: # %bb.0:
472 ; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
473 ; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
474 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
475 ; AVX-RECIP-NEXT: vsubps %xmm0, %xmm2, %xmm0
476 ; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
477 ; AVX-RECIP-NEXT: vaddps %xmm0, %xmm1, %xmm0
478 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
479 ; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
480 ; AVX-RECIP-NEXT: retq
482 ; FMA-RECIP-LABEL: v4f32_one_step_2_divs:
483 ; FMA-RECIP: # %bb.0:
484 ; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
485 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
486 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
487 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
488 ; FMA-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
489 ; FMA-RECIP-NEXT: retq
491 ; BDVER2-LABEL: v4f32_one_step_2_divs:
493 ; BDVER2-NEXT: vrcpps %xmm0, %xmm1
494 ; BDVER2-NEXT: vfnmaddps {{.*}}(%rip), %xmm1, %xmm0, %xmm0
495 ; BDVER2-NEXT: vfmaddps %xmm1, %xmm0, %xmm1, %xmm0
496 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
497 ; BDVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0
500 ; BTVER2-LABEL: v4f32_one_step_2_divs:
502 ; BTVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
503 ; BTVER2-NEXT: vrcpps %xmm0, %xmm1
504 ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0
505 ; BTVER2-NEXT: vsubps %xmm0, %xmm2, %xmm0
506 ; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0
507 ; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0
508 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
509 ; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0
512 ; SANDY-LABEL: v4f32_one_step_2_divs:
514 ; SANDY-NEXT: vrcpps %xmm0, %xmm1
515 ; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0
516 ; SANDY-NEXT: vmovaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
517 ; SANDY-NEXT: vsubps %xmm0, %xmm2, %xmm0
518 ; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0
519 ; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0
520 ; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
521 ; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0
524 ; HASWELL-LABEL: v4f32_one_step_2_divs:
526 ; HASWELL-NEXT: vrcpps %xmm0, %xmm1
527 ; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
528 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
529 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
530 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
531 ; HASWELL-NEXT: vmulps %xmm0, %xmm1, %xmm0
534 ; HASWELL-NO-FMA-LABEL: v4f32_one_step_2_divs:
535 ; HASWELL-NO-FMA: # %bb.0:
536 ; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
537 ; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
538 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
539 ; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm2, %xmm0
540 ; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
541 ; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0
542 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
543 ; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
544 ; HASWELL-NO-FMA-NEXT: retq
546 ; KNL-LABEL: v4f32_one_step_2_divs:
548 ; KNL-NEXT: vrcpps %xmm0, %xmm1
549 ; KNL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
550 ; KNL-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
551 ; KNL-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
552 ; KNL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
553 ; KNL-NEXT: vmulps %xmm0, %xmm1, %xmm0
556 ; SKX-LABEL: v4f32_one_step_2_divs:
558 ; SKX-NEXT: vrcpps %xmm0, %xmm1
559 ; SKX-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) + mem
560 ; SKX-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) + xmm1
561 ; SKX-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm1
562 ; SKX-NEXT: vmulps %xmm0, %xmm1, %xmm0
564 %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
565 %div2 = fdiv fast <4 x float> %div, %x
566 ret <4 x float> %div2
569 define <4 x float> @v4f32_two_step2(<4 x float> %x) #2 {
570 ; SSE-LABEL: v4f32_two_step2:
572 ; SSE-NEXT: rcpps %xmm0, %xmm2
573 ; SSE-NEXT: movaps %xmm0, %xmm3
574 ; SSE-NEXT: mulps %xmm2, %xmm3
575 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
576 ; SSE-NEXT: movaps %xmm1, %xmm4
577 ; SSE-NEXT: subps %xmm3, %xmm4
578 ; SSE-NEXT: mulps %xmm2, %xmm4
579 ; SSE-NEXT: addps %xmm2, %xmm4
580 ; SSE-NEXT: mulps %xmm4, %xmm0
581 ; SSE-NEXT: subps %xmm0, %xmm1
582 ; SSE-NEXT: mulps %xmm4, %xmm1
583 ; SSE-NEXT: addps %xmm4, %xmm1
584 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
585 ; SSE-NEXT: movaps %xmm1, %xmm0
588 ; AVX-RECIP-LABEL: v4f32_two_step2:
589 ; AVX-RECIP: # %bb.0:
590 ; AVX-RECIP-NEXT: vrcpps %xmm0, %xmm1
591 ; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm2
592 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
593 ; AVX-RECIP-NEXT: vsubps %xmm2, %xmm3, %xmm2
594 ; AVX-RECIP-NEXT: vmulps %xmm2, %xmm1, %xmm2
595 ; AVX-RECIP-NEXT: vaddps %xmm2, %xmm1, %xmm1
596 ; AVX-RECIP-NEXT: vmulps %xmm1, %xmm0, %xmm0
597 ; AVX-RECIP-NEXT: vsubps %xmm0, %xmm3, %xmm0
598 ; AVX-RECIP-NEXT: vmulps %xmm0, %xmm1, %xmm0
599 ; AVX-RECIP-NEXT: vaddps %xmm0, %xmm1, %xmm0
600 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
601 ; AVX-RECIP-NEXT: retq
603 ; FMA-RECIP-LABEL: v4f32_two_step2:
604 ; FMA-RECIP: # %bb.0:
605 ; FMA-RECIP-NEXT: vrcpps %xmm0, %xmm1
606 ; FMA-RECIP-NEXT: vmovaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
607 ; FMA-RECIP-NEXT: vmovaps %xmm1, %xmm3
608 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
609 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
610 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
611 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
612 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
613 ; FMA-RECIP-NEXT: retq
615 ; BDVER2-LABEL: v4f32_two_step2:
617 ; BDVER2-NEXT: vrcpps %xmm0, %xmm1
618 ; BDVER2-NEXT: vmovaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
619 ; BDVER2-NEXT: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm3
620 ; BDVER2-NEXT: vfmaddps %xmm1, %xmm3, %xmm1, %xmm1
621 ; BDVER2-NEXT: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0
622 ; BDVER2-NEXT: vfmaddps %xmm1, %xmm0, %xmm1, %xmm0
623 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
626 ; BTVER2-LABEL: v4f32_two_step2:
628 ; BTVER2-NEXT: vmovaps {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
629 ; BTVER2-NEXT: vrcpps %xmm0, %xmm1
630 ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm2
631 ; BTVER2-NEXT: vsubps %xmm2, %xmm3, %xmm2
632 ; BTVER2-NEXT: vmulps %xmm2, %xmm1, %xmm2
633 ; BTVER2-NEXT: vaddps %xmm2, %xmm1, %xmm1
634 ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0
635 ; BTVER2-NEXT: vsubps %xmm0, %xmm3, %xmm0
636 ; BTVER2-NEXT: vmulps %xmm0, %xmm1, %xmm0
637 ; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0
638 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
641 ; SANDY-LABEL: v4f32_two_step2:
643 ; SANDY-NEXT: vrcpps %xmm0, %xmm1
644 ; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm2
645 ; SANDY-NEXT: vmovaps {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
646 ; SANDY-NEXT: vsubps %xmm2, %xmm3, %xmm2
647 ; SANDY-NEXT: vmulps %xmm2, %xmm1, %xmm2
648 ; SANDY-NEXT: vaddps %xmm2, %xmm1, %xmm1
649 ; SANDY-NEXT: vmulps %xmm1, %xmm0, %xmm0
650 ; SANDY-NEXT: vsubps %xmm0, %xmm3, %xmm0
651 ; SANDY-NEXT: vmulps %xmm0, %xmm1, %xmm0
652 ; SANDY-NEXT: vaddps %xmm0, %xmm1, %xmm0
653 ; SANDY-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
656 ; HASWELL-LABEL: v4f32_two_step2:
658 ; HASWELL-NEXT: vrcpps %xmm0, %xmm1
659 ; HASWELL-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
660 ; HASWELL-NEXT: vmovaps %xmm1, %xmm3
661 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
662 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
663 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
664 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
665 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
668 ; HASWELL-NO-FMA-LABEL: v4f32_two_step2:
669 ; HASWELL-NO-FMA: # %bb.0:
670 ; HASWELL-NO-FMA-NEXT: vrcpps %xmm0, %xmm1
671 ; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm2
672 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
673 ; HASWELL-NO-FMA-NEXT: vsubps %xmm2, %xmm3, %xmm2
674 ; HASWELL-NO-FMA-NEXT: vmulps %xmm2, %xmm1, %xmm2
675 ; HASWELL-NO-FMA-NEXT: vaddps %xmm2, %xmm1, %xmm1
676 ; HASWELL-NO-FMA-NEXT: vmulps %xmm1, %xmm0, %xmm0
677 ; HASWELL-NO-FMA-NEXT: vsubps %xmm0, %xmm3, %xmm0
678 ; HASWELL-NO-FMA-NEXT: vmulps %xmm0, %xmm1, %xmm0
679 ; HASWELL-NO-FMA-NEXT: vaddps %xmm0, %xmm1, %xmm0
680 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
681 ; HASWELL-NO-FMA-NEXT: retq
683 ; AVX512-LABEL: v4f32_two_step2:
685 ; AVX512-NEXT: vrcpps %xmm0, %xmm1
686 ; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
687 ; AVX512-NEXT: vmovaps %xmm1, %xmm3
688 ; AVX512-NEXT: vfnmadd213ps {{.*#+}} xmm3 = -(xmm0 * xmm3) + xmm2
689 ; AVX512-NEXT: vfmadd132ps {{.*#+}} xmm3 = (xmm3 * xmm1) + xmm1
690 ; AVX512-NEXT: vfnmadd213ps {{.*#+}} xmm0 = -(xmm3 * xmm0) + xmm2
691 ; AVX512-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * xmm3) + xmm3
692 ; AVX512-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
694 %div = fdiv fast <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %x
698 define <8 x float> @v8f32_one_step2(<8 x float> %x) #1 {
699 ; SSE-LABEL: v8f32_one_step2:
701 ; SSE-NEXT: rcpps %xmm1, %xmm4
702 ; SSE-NEXT: mulps %xmm4, %xmm1
703 ; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
704 ; SSE-NEXT: movaps %xmm2, %xmm3
705 ; SSE-NEXT: subps %xmm1, %xmm3
706 ; SSE-NEXT: mulps %xmm4, %xmm3
707 ; SSE-NEXT: addps %xmm4, %xmm3
708 ; SSE-NEXT: rcpps %xmm0, %xmm1
709 ; SSE-NEXT: mulps %xmm1, %xmm0
710 ; SSE-NEXT: subps %xmm0, %xmm2
711 ; SSE-NEXT: mulps %xmm1, %xmm2
712 ; SSE-NEXT: addps %xmm1, %xmm2
713 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm2
714 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm3
715 ; SSE-NEXT: movaps %xmm2, %xmm0
716 ; SSE-NEXT: movaps %xmm3, %xmm1
719 ; AVX-RECIP-LABEL: v8f32_one_step2:
720 ; AVX-RECIP: # %bb.0:
721 ; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
722 ; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
723 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
724 ; AVX-RECIP-NEXT: vsubps %ymm0, %ymm2, %ymm0
725 ; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
726 ; AVX-RECIP-NEXT: vaddps %ymm0, %ymm1, %ymm0
727 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
728 ; AVX-RECIP-NEXT: retq
730 ; FMA-RECIP-LABEL: v8f32_one_step2:
731 ; FMA-RECIP: # %bb.0:
732 ; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
733 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem
734 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
735 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
736 ; FMA-RECIP-NEXT: retq
738 ; BDVER2-LABEL: v8f32_one_step2:
740 ; BDVER2-NEXT: vrcpps %ymm0, %ymm1
741 ; BDVER2-NEXT: vfnmaddps {{.*}}(%rip), %ymm1, %ymm0, %ymm0
742 ; BDVER2-NEXT: vfmaddps %ymm1, %ymm0, %ymm1, %ymm0
743 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
746 ; BTVER2-LABEL: v8f32_one_step2:
748 ; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
749 ; BTVER2-NEXT: vrcpps %ymm0, %ymm1
750 ; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0
751 ; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0
752 ; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0
753 ; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0
754 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
757 ; SANDY-LABEL: v8f32_one_step2:
759 ; SANDY-NEXT: vrcpps %ymm0, %ymm1
760 ; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0
761 ; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
762 ; SANDY-NEXT: vsubps %ymm0, %ymm2, %ymm0
763 ; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0
764 ; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0
765 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
768 ; HASWELL-LABEL: v8f32_one_step2:
770 ; HASWELL-NEXT: vrcpps %ymm0, %ymm1
771 ; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
772 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
773 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
774 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
777 ; HASWELL-NO-FMA-LABEL: v8f32_one_step2:
778 ; HASWELL-NO-FMA: # %bb.0:
779 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
780 ; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0
781 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
782 ; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm2, %ymm0
783 ; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0
784 ; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0
785 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
786 ; HASWELL-NO-FMA-NEXT: retq
788 ; KNL-LABEL: v8f32_one_step2:
790 ; KNL-NEXT: vrcpps %ymm0, %ymm1
791 ; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
792 ; KNL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
793 ; KNL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
794 ; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
797 ; SKX-LABEL: v8f32_one_step2:
799 ; SKX-NEXT: vrcpps %ymm0, %ymm1
800 ; SKX-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem
801 ; SKX-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
802 ; SKX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
804 %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
808 define <8 x float> @v8f32_one_step_2_divs(<8 x float> %x) #1 {
809 ; SSE-LABEL: v8f32_one_step_2_divs:
811 ; SSE-NEXT: rcpps %xmm0, %xmm2
812 ; SSE-NEXT: mulps %xmm2, %xmm0
813 ; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
814 ; SSE-NEXT: movaps %xmm3, %xmm4
815 ; SSE-NEXT: subps %xmm0, %xmm4
816 ; SSE-NEXT: mulps %xmm2, %xmm4
817 ; SSE-NEXT: addps %xmm2, %xmm4
818 ; SSE-NEXT: rcpps %xmm1, %xmm0
819 ; SSE-NEXT: mulps %xmm0, %xmm1
820 ; SSE-NEXT: subps %xmm1, %xmm3
821 ; SSE-NEXT: mulps %xmm0, %xmm3
822 ; SSE-NEXT: addps %xmm0, %xmm3
823 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [5.0E+0,6.0E+0,7.0E+0,8.0E+0]
824 ; SSE-NEXT: mulps %xmm3, %xmm1
825 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0]
826 ; SSE-NEXT: mulps %xmm4, %xmm0
827 ; SSE-NEXT: mulps %xmm4, %xmm0
828 ; SSE-NEXT: mulps %xmm3, %xmm1
831 ; AVX-RECIP-LABEL: v8f32_one_step_2_divs:
832 ; AVX-RECIP: # %bb.0:
833 ; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
834 ; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
835 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
836 ; AVX-RECIP-NEXT: vsubps %ymm0, %ymm2, %ymm0
837 ; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
838 ; AVX-RECIP-NEXT: vaddps %ymm0, %ymm1, %ymm0
839 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
840 ; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
841 ; AVX-RECIP-NEXT: retq
843 ; FMA-RECIP-LABEL: v8f32_one_step_2_divs:
844 ; FMA-RECIP: # %bb.0:
845 ; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
846 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem
847 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
848 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
849 ; FMA-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
850 ; FMA-RECIP-NEXT: retq
852 ; BDVER2-LABEL: v8f32_one_step_2_divs:
854 ; BDVER2-NEXT: vrcpps %ymm0, %ymm1
855 ; BDVER2-NEXT: vfnmaddps {{.*}}(%rip), %ymm1, %ymm0, %ymm0
856 ; BDVER2-NEXT: vfmaddps %ymm1, %ymm0, %ymm1, %ymm0
857 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
858 ; BDVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0
861 ; BTVER2-LABEL: v8f32_one_step_2_divs:
863 ; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
864 ; BTVER2-NEXT: vrcpps %ymm0, %ymm1
865 ; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0
866 ; BTVER2-NEXT: vsubps %ymm0, %ymm2, %ymm0
867 ; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0
868 ; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0
869 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
870 ; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0
873 ; SANDY-LABEL: v8f32_one_step_2_divs:
875 ; SANDY-NEXT: vrcpps %ymm0, %ymm1
876 ; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0
877 ; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
878 ; SANDY-NEXT: vsubps %ymm0, %ymm2, %ymm0
879 ; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0
880 ; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0
881 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
882 ; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0
885 ; HASWELL-LABEL: v8f32_one_step_2_divs:
887 ; HASWELL-NEXT: vrcpps %ymm0, %ymm1
888 ; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
889 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
890 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
891 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
892 ; HASWELL-NEXT: vmulps %ymm0, %ymm1, %ymm0
895 ; HASWELL-NO-FMA-LABEL: v8f32_one_step_2_divs:
896 ; HASWELL-NO-FMA: # %bb.0:
897 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
898 ; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0
899 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
900 ; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm2, %ymm0
901 ; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0
902 ; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0
903 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
904 ; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0
905 ; HASWELL-NO-FMA-NEXT: retq
907 ; KNL-LABEL: v8f32_one_step_2_divs:
909 ; KNL-NEXT: vrcpps %ymm0, %ymm1
910 ; KNL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
911 ; KNL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
912 ; KNL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
913 ; KNL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
914 ; KNL-NEXT: vmulps %ymm0, %ymm1, %ymm0
917 ; SKX-LABEL: v8f32_one_step_2_divs:
919 ; SKX-NEXT: vrcpps %ymm0, %ymm1
920 ; SKX-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + mem
921 ; SKX-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) + ymm1
922 ; SKX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm1
923 ; SKX-NEXT: vmulps %ymm0, %ymm1, %ymm0
925 %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
926 %div2 = fdiv fast <8 x float> %div, %x
927 ret <8 x float> %div2
930 define <8 x float> @v8f32_two_step2(<8 x float> %x) #2 {
931 ; SSE-LABEL: v8f32_two_step2:
933 ; SSE-NEXT: movaps %xmm0, %xmm2
934 ; SSE-NEXT: rcpps %xmm1, %xmm3
935 ; SSE-NEXT: movaps %xmm1, %xmm4
936 ; SSE-NEXT: mulps %xmm3, %xmm4
937 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
938 ; SSE-NEXT: movaps %xmm0, %xmm5
939 ; SSE-NEXT: subps %xmm4, %xmm5
940 ; SSE-NEXT: mulps %xmm3, %xmm5
941 ; SSE-NEXT: addps %xmm3, %xmm5
942 ; SSE-NEXT: mulps %xmm5, %xmm1
943 ; SSE-NEXT: movaps %xmm0, %xmm3
944 ; SSE-NEXT: subps %xmm1, %xmm3
945 ; SSE-NEXT: mulps %xmm5, %xmm3
946 ; SSE-NEXT: addps %xmm5, %xmm3
947 ; SSE-NEXT: rcpps %xmm2, %xmm1
948 ; SSE-NEXT: movaps %xmm2, %xmm4
949 ; SSE-NEXT: mulps %xmm1, %xmm4
950 ; SSE-NEXT: movaps %xmm0, %xmm5
951 ; SSE-NEXT: subps %xmm4, %xmm5
952 ; SSE-NEXT: mulps %xmm1, %xmm5
953 ; SSE-NEXT: addps %xmm1, %xmm5
954 ; SSE-NEXT: mulps %xmm5, %xmm2
955 ; SSE-NEXT: subps %xmm2, %xmm0
956 ; SSE-NEXT: mulps %xmm5, %xmm0
957 ; SSE-NEXT: addps %xmm5, %xmm0
958 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm0
959 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm3
960 ; SSE-NEXT: movaps %xmm3, %xmm1
963 ; AVX-RECIP-LABEL: v8f32_two_step2:
964 ; AVX-RECIP: # %bb.0:
965 ; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm1
966 ; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm2
967 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
968 ; AVX-RECIP-NEXT: vsubps %ymm2, %ymm3, %ymm2
969 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm2
970 ; AVX-RECIP-NEXT: vaddps %ymm2, %ymm1, %ymm1
971 ; AVX-RECIP-NEXT: vmulps %ymm1, %ymm0, %ymm0
972 ; AVX-RECIP-NEXT: vsubps %ymm0, %ymm3, %ymm0
973 ; AVX-RECIP-NEXT: vmulps %ymm0, %ymm1, %ymm0
974 ; AVX-RECIP-NEXT: vaddps %ymm0, %ymm1, %ymm0
975 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
976 ; AVX-RECIP-NEXT: retq
978 ; FMA-RECIP-LABEL: v8f32_two_step2:
979 ; FMA-RECIP: # %bb.0:
980 ; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm1
981 ; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
982 ; FMA-RECIP-NEXT: vmovaps %ymm1, %ymm3
983 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2
984 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1
985 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2
986 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3
987 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
988 ; FMA-RECIP-NEXT: retq
990 ; BDVER2-LABEL: v8f32_two_step2:
992 ; BDVER2-NEXT: vrcpps %ymm0, %ymm1
993 ; BDVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
994 ; BDVER2-NEXT: vfnmaddps %ymm2, %ymm1, %ymm0, %ymm3
995 ; BDVER2-NEXT: vfmaddps %ymm1, %ymm3, %ymm1, %ymm1
996 ; BDVER2-NEXT: vfnmaddps %ymm2, %ymm1, %ymm0, %ymm0
997 ; BDVER2-NEXT: vfmaddps %ymm1, %ymm0, %ymm1, %ymm0
998 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1001 ; BTVER2-LABEL: v8f32_two_step2:
1003 ; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1004 ; BTVER2-NEXT: vrcpps %ymm0, %ymm1
1005 ; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm2
1006 ; BTVER2-NEXT: vsubps %ymm2, %ymm3, %ymm2
1007 ; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm2
1008 ; BTVER2-NEXT: vaddps %ymm2, %ymm1, %ymm1
1009 ; BTVER2-NEXT: vmulps %ymm1, %ymm0, %ymm0
1010 ; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0
1011 ; BTVER2-NEXT: vmulps %ymm0, %ymm1, %ymm0
1012 ; BTVER2-NEXT: vaddps %ymm0, %ymm1, %ymm0
1013 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1016 ; SANDY-LABEL: v8f32_two_step2:
1018 ; SANDY-NEXT: vrcpps %ymm0, %ymm1
1019 ; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm2
1020 ; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1021 ; SANDY-NEXT: vsubps %ymm2, %ymm3, %ymm2
1022 ; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm2
1023 ; SANDY-NEXT: vaddps %ymm2, %ymm1, %ymm1
1024 ; SANDY-NEXT: vmulps %ymm1, %ymm0, %ymm0
1025 ; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0
1026 ; SANDY-NEXT: vmulps %ymm0, %ymm1, %ymm0
1027 ; SANDY-NEXT: vaddps %ymm0, %ymm1, %ymm0
1028 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1031 ; HASWELL-LABEL: v8f32_two_step2:
1033 ; HASWELL-NEXT: vrcpps %ymm0, %ymm1
1034 ; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1035 ; HASWELL-NEXT: vmovaps %ymm1, %ymm3
1036 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2
1037 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1
1038 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2
1039 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3
1040 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1041 ; HASWELL-NEXT: retq
1043 ; HASWELL-NO-FMA-LABEL: v8f32_two_step2:
1044 ; HASWELL-NO-FMA: # %bb.0:
1045 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm1
1046 ; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm2
1047 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1048 ; HASWELL-NO-FMA-NEXT: vsubps %ymm2, %ymm3, %ymm2
1049 ; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm2
1050 ; HASWELL-NO-FMA-NEXT: vaddps %ymm2, %ymm1, %ymm1
1051 ; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm0, %ymm0
1052 ; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm3, %ymm0
1053 ; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm1, %ymm0
1054 ; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm1, %ymm0
1055 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1056 ; HASWELL-NO-FMA-NEXT: retq
1058 ; AVX512-LABEL: v8f32_two_step2:
1060 ; AVX512-NEXT: vrcpps %ymm0, %ymm1
1061 ; AVX512-NEXT: vbroadcastss {{.*#+}} ymm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1062 ; AVX512-NEXT: vmovaps %ymm1, %ymm3
1063 ; AVX512-NEXT: vfnmadd213ps {{.*#+}} ymm3 = -(ymm0 * ymm3) + ymm2
1064 ; AVX512-NEXT: vfmadd132ps {{.*#+}} ymm3 = (ymm3 * ymm1) + ymm1
1065 ; AVX512-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm3 * ymm0) + ymm2
1066 ; AVX512-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm3) + ymm3
1067 ; AVX512-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1069 %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
1070 ret <8 x float> %div
1073 define <8 x float> @v8f32_no_step(<8 x float> %x) #3 {
1074 ; SSE-LABEL: v8f32_no_step:
1076 ; SSE-NEXT: rcpps %xmm0, %xmm0
1077 ; SSE-NEXT: rcpps %xmm1, %xmm1
1080 ; AVX-LABEL: v8f32_no_step:
1082 ; AVX-NEXT: vrcpps %ymm0, %ymm0
1084 %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
1085 ret <8 x float> %div
1088 define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 {
1089 ; SSE-LABEL: v8f32_no_step2:
1091 ; SSE-NEXT: rcpps %xmm1, %xmm1
1092 ; SSE-NEXT: rcpps %xmm0, %xmm0
1093 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm0
1094 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
1097 ; AVX-LABEL: v8f32_no_step2:
1099 ; AVX-NEXT: vrcpps %ymm0, %ymm0
1100 ; AVX-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1102 %div = fdiv fast <8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, %x
1103 ret <8 x float> %div
1106 define <16 x float> @v16f32_one_step2(<16 x float> %x) #1 {
1107 ; SSE-LABEL: v16f32_one_step2:
1109 ; SSE-NEXT: movaps %xmm3, %xmm4
1110 ; SSE-NEXT: movaps %xmm2, %xmm5
1111 ; SSE-NEXT: movaps %xmm0, %xmm6
1112 ; SSE-NEXT: rcpps %xmm3, %xmm2
1113 ; SSE-NEXT: mulps %xmm2, %xmm4
1114 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1115 ; SSE-NEXT: movaps %xmm0, %xmm3
1116 ; SSE-NEXT: subps %xmm4, %xmm3
1117 ; SSE-NEXT: mulps %xmm2, %xmm3
1118 ; SSE-NEXT: addps %xmm2, %xmm3
1119 ; SSE-NEXT: rcpps %xmm5, %xmm4
1120 ; SSE-NEXT: mulps %xmm4, %xmm5
1121 ; SSE-NEXT: movaps %xmm0, %xmm2
1122 ; SSE-NEXT: subps %xmm5, %xmm2
1123 ; SSE-NEXT: mulps %xmm4, %xmm2
1124 ; SSE-NEXT: addps %xmm4, %xmm2
1125 ; SSE-NEXT: rcpps %xmm1, %xmm5
1126 ; SSE-NEXT: mulps %xmm5, %xmm1
1127 ; SSE-NEXT: movaps %xmm0, %xmm4
1128 ; SSE-NEXT: subps %xmm1, %xmm4
1129 ; SSE-NEXT: mulps %xmm5, %xmm4
1130 ; SSE-NEXT: addps %xmm5, %xmm4
1131 ; SSE-NEXT: rcpps %xmm6, %xmm1
1132 ; SSE-NEXT: mulps %xmm1, %xmm6
1133 ; SSE-NEXT: subps %xmm6, %xmm0
1134 ; SSE-NEXT: mulps %xmm1, %xmm0
1135 ; SSE-NEXT: addps %xmm1, %xmm0
1136 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm0
1137 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm4
1138 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm2
1139 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm3
1140 ; SSE-NEXT: movaps %xmm4, %xmm1
1143 ; AVX-RECIP-LABEL: v16f32_one_step2:
1144 ; AVX-RECIP: # %bb.0:
1145 ; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm2
1146 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm1
1147 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1148 ; AVX-RECIP-NEXT: vsubps %ymm1, %ymm3, %ymm1
1149 ; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1
1150 ; AVX-RECIP-NEXT: vaddps %ymm1, %ymm2, %ymm1
1151 ; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm2
1152 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm0
1153 ; AVX-RECIP-NEXT: vsubps %ymm0, %ymm3, %ymm0
1154 ; AVX-RECIP-NEXT: vmulps %ymm0, %ymm2, %ymm0
1155 ; AVX-RECIP-NEXT: vaddps %ymm0, %ymm2, %ymm0
1156 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1157 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1158 ; AVX-RECIP-NEXT: retq
1160 ; FMA-RECIP-LABEL: v16f32_one_step2:
1161 ; FMA-RECIP: # %bb.0:
1162 ; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm2
1163 ; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1164 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3
1165 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2
1166 ; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm2
1167 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm3
1168 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm2
1169 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1170 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1171 ; FMA-RECIP-NEXT: retq
1173 ; BDVER2-LABEL: v16f32_one_step2:
1175 ; BDVER2-NEXT: vrcpps %ymm1, %ymm2
1176 ; BDVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1177 ; BDVER2-NEXT: vrcpps %ymm0, %ymm4
1178 ; BDVER2-NEXT: vfnmaddps %ymm3, %ymm2, %ymm1, %ymm1
1179 ; BDVER2-NEXT: vfnmaddps %ymm3, %ymm4, %ymm0, %ymm0
1180 ; BDVER2-NEXT: vfmaddps %ymm2, %ymm1, %ymm2, %ymm1
1181 ; BDVER2-NEXT: vfmaddps %ymm4, %ymm0, %ymm4, %ymm0
1182 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1183 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1186 ; BTVER2-LABEL: v16f32_one_step2:
1188 ; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1189 ; BTVER2-NEXT: vrcpps %ymm1, %ymm2
1190 ; BTVER2-NEXT: vrcpps %ymm0, %ymm4
1191 ; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm1
1192 ; BTVER2-NEXT: vmulps %ymm4, %ymm0, %ymm0
1193 ; BTVER2-NEXT: vsubps %ymm1, %ymm3, %ymm1
1194 ; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0
1195 ; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1
1196 ; BTVER2-NEXT: vmulps %ymm0, %ymm4, %ymm0
1197 ; BTVER2-NEXT: vaddps %ymm1, %ymm2, %ymm1
1198 ; BTVER2-NEXT: vaddps %ymm0, %ymm4, %ymm0
1199 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1200 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1203 ; SANDY-LABEL: v16f32_one_step2:
1205 ; SANDY-NEXT: vrcpps %ymm1, %ymm2
1206 ; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm1
1207 ; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1208 ; SANDY-NEXT: vsubps %ymm1, %ymm3, %ymm1
1209 ; SANDY-NEXT: vmulps %ymm1, %ymm2, %ymm1
1210 ; SANDY-NEXT: vaddps %ymm1, %ymm2, %ymm1
1211 ; SANDY-NEXT: vrcpps %ymm0, %ymm2
1212 ; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm0
1213 ; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0
1214 ; SANDY-NEXT: vmulps %ymm0, %ymm2, %ymm0
1215 ; SANDY-NEXT: vaddps %ymm0, %ymm2, %ymm0
1216 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1217 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1220 ; HASWELL-LABEL: v16f32_one_step2:
1222 ; HASWELL-NEXT: vrcpps %ymm1, %ymm2
1223 ; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1224 ; HASWELL-NEXT: vrcpps %ymm0, %ymm4
1225 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3
1226 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2
1227 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm4 * ymm0) + ymm3
1228 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm4) + ymm4
1229 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1230 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1231 ; HASWELL-NEXT: retq
1233 ; HASWELL-NO-FMA-LABEL: v16f32_one_step2:
1234 ; HASWELL-NO-FMA: # %bb.0:
1235 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm2
1236 ; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm1
1237 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1238 ; HASWELL-NO-FMA-NEXT: vsubps %ymm1, %ymm3, %ymm1
1239 ; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm2, %ymm1
1240 ; HASWELL-NO-FMA-NEXT: vaddps %ymm1, %ymm2, %ymm1
1241 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm2
1242 ; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
1243 ; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm3, %ymm0
1244 ; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm2, %ymm0
1245 ; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm2, %ymm0
1246 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1247 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1248 ; HASWELL-NO-FMA-NEXT: retq
1250 ; AVX512-LABEL: v16f32_one_step2:
1252 ; AVX512-NEXT: vrcp14ps %zmm0, %zmm1
1253 ; AVX512-NEXT: vfnmadd213ps {{.*#+}} zmm0 = -(zmm1 * zmm0) + mem
1254 ; AVX512-NEXT: vfmadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) + zmm1
1255 ; AVX512-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
1257 %div = fdiv fast <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, %x
1258 ret <16 x float> %div
1261 define <16 x float> @v16f32_one_step_2_divs(<16 x float> %x) #1 {
1262 ; SSE-LABEL: v16f32_one_step_2_divs:
1264 ; SSE-NEXT: rcpps %xmm0, %xmm6
1265 ; SSE-NEXT: mulps %xmm6, %xmm0
1266 ; SSE-NEXT: movaps {{.*#+}} xmm4 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1267 ; SSE-NEXT: movaps %xmm4, %xmm5
1268 ; SSE-NEXT: subps %xmm0, %xmm5
1269 ; SSE-NEXT: mulps %xmm6, %xmm5
1270 ; SSE-NEXT: addps %xmm6, %xmm5
1271 ; SSE-NEXT: rcpps %xmm1, %xmm0
1272 ; SSE-NEXT: mulps %xmm0, %xmm1
1273 ; SSE-NEXT: movaps %xmm4, %xmm6
1274 ; SSE-NEXT: subps %xmm1, %xmm6
1275 ; SSE-NEXT: mulps %xmm0, %xmm6
1276 ; SSE-NEXT: addps %xmm0, %xmm6
1277 ; SSE-NEXT: rcpps %xmm2, %xmm0
1278 ; SSE-NEXT: mulps %xmm0, %xmm2
1279 ; SSE-NEXT: movaps %xmm4, %xmm7
1280 ; SSE-NEXT: subps %xmm2, %xmm7
1281 ; SSE-NEXT: mulps %xmm0, %xmm7
1282 ; SSE-NEXT: addps %xmm0, %xmm7
1283 ; SSE-NEXT: rcpps %xmm3, %xmm0
1284 ; SSE-NEXT: mulps %xmm0, %xmm3
1285 ; SSE-NEXT: subps %xmm3, %xmm4
1286 ; SSE-NEXT: mulps %xmm0, %xmm4
1287 ; SSE-NEXT: addps %xmm0, %xmm4
1288 ; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.3E+1,1.4E+1,1.5E+1,1.6E+1]
1289 ; SSE-NEXT: mulps %xmm4, %xmm3
1290 ; SSE-NEXT: movaps {{.*#+}} xmm2 = [9.0E+0,1.0E+1,1.1E+1,1.2E+1]
1291 ; SSE-NEXT: mulps %xmm7, %xmm2
1292 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [5.0E+0,6.0E+0,7.0E+0,8.0E+0]
1293 ; SSE-NEXT: mulps %xmm6, %xmm1
1294 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0]
1295 ; SSE-NEXT: mulps %xmm5, %xmm0
1296 ; SSE-NEXT: mulps %xmm5, %xmm0
1297 ; SSE-NEXT: mulps %xmm6, %xmm1
1298 ; SSE-NEXT: mulps %xmm7, %xmm2
1299 ; SSE-NEXT: mulps %xmm4, %xmm3
1302 ; AVX-RECIP-LABEL: v16f32_one_step_2_divs:
1303 ; AVX-RECIP: # %bb.0:
1304 ; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm2
1305 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm0
1306 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1307 ; AVX-RECIP-NEXT: vsubps %ymm0, %ymm3, %ymm0
1308 ; AVX-RECIP-NEXT: vmulps %ymm0, %ymm2, %ymm0
1309 ; AVX-RECIP-NEXT: vaddps %ymm0, %ymm2, %ymm0
1310 ; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm2
1311 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm1
1312 ; AVX-RECIP-NEXT: vsubps %ymm1, %ymm3, %ymm1
1313 ; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1
1314 ; AVX-RECIP-NEXT: vaddps %ymm1, %ymm2, %ymm1
1315 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2
1316 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3
1317 ; AVX-RECIP-NEXT: vmulps %ymm0, %ymm3, %ymm0
1318 ; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1
1319 ; AVX-RECIP-NEXT: retq
1321 ; FMA-RECIP-LABEL: v16f32_one_step_2_divs:
1322 ; FMA-RECIP: # %bb.0:
1323 ; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm2
1324 ; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1325 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm3
1326 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm2
1327 ; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm2
1328 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3
1329 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2
1330 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2
1331 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3
1332 ; FMA-RECIP-NEXT: vmulps %ymm0, %ymm3, %ymm0
1333 ; FMA-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1
1334 ; FMA-RECIP-NEXT: retq
1336 ; BDVER2-LABEL: v16f32_one_step_2_divs:
1338 ; BDVER2-NEXT: vrcpps %ymm0, %ymm2
1339 ; BDVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1340 ; BDVER2-NEXT: vfnmaddps %ymm3, %ymm2, %ymm0, %ymm0
1341 ; BDVER2-NEXT: vfmaddps %ymm2, %ymm0, %ymm2, %ymm0
1342 ; BDVER2-NEXT: vrcpps %ymm1, %ymm2
1343 ; BDVER2-NEXT: vfnmaddps %ymm3, %ymm2, %ymm1, %ymm1
1344 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3
1345 ; BDVER2-NEXT: vfmaddps %ymm2, %ymm1, %ymm2, %ymm1
1346 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2
1347 ; BDVER2-NEXT: vmulps %ymm0, %ymm3, %ymm0
1348 ; BDVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1
1351 ; BTVER2-LABEL: v16f32_one_step_2_divs:
1353 ; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1354 ; BTVER2-NEXT: vrcpps %ymm0, %ymm2
1355 ; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm0
1356 ; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0
1357 ; BTVER2-NEXT: vmulps %ymm0, %ymm2, %ymm0
1358 ; BTVER2-NEXT: vaddps %ymm0, %ymm2, %ymm0
1359 ; BTVER2-NEXT: vrcpps %ymm1, %ymm2
1360 ; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm1
1361 ; BTVER2-NEXT: vsubps %ymm1, %ymm3, %ymm1
1362 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3
1363 ; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1
1364 ; BTVER2-NEXT: vaddps %ymm1, %ymm2, %ymm1
1365 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2
1366 ; BTVER2-NEXT: vmulps %ymm0, %ymm3, %ymm0
1367 ; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1
1370 ; SANDY-LABEL: v16f32_one_step_2_divs:
1372 ; SANDY-NEXT: vrcpps %ymm0, %ymm2
1373 ; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm0
1374 ; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1375 ; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0
1376 ; SANDY-NEXT: vrcpps %ymm1, %ymm4
1377 ; SANDY-NEXT: vmulps %ymm0, %ymm2, %ymm0
1378 ; SANDY-NEXT: vaddps %ymm0, %ymm2, %ymm0
1379 ; SANDY-NEXT: vmulps %ymm4, %ymm1, %ymm1
1380 ; SANDY-NEXT: vsubps %ymm1, %ymm3, %ymm1
1381 ; SANDY-NEXT: vmulps %ymm1, %ymm4, %ymm1
1382 ; SANDY-NEXT: vaddps %ymm1, %ymm4, %ymm1
1383 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2
1384 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3
1385 ; SANDY-NEXT: vmulps %ymm0, %ymm3, %ymm0
1386 ; SANDY-NEXT: vmulps %ymm1, %ymm2, %ymm1
1389 ; HASWELL-LABEL: v16f32_one_step_2_divs:
1391 ; HASWELL-NEXT: vrcpps %ymm0, %ymm2
1392 ; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1393 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm3
1394 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm2
1395 ; HASWELL-NEXT: vrcpps %ymm1, %ymm2
1396 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3
1397 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2
1398 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2
1399 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3
1400 ; HASWELL-NEXT: vmulps %ymm0, %ymm3, %ymm0
1401 ; HASWELL-NEXT: vmulps %ymm1, %ymm2, %ymm1
1402 ; HASWELL-NEXT: retq
1404 ; HASWELL-NO-FMA-LABEL: v16f32_one_step_2_divs:
1405 ; HASWELL-NO-FMA: # %bb.0:
1406 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm2
1407 ; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
1408 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1409 ; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm3, %ymm0
1410 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm4
1411 ; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm2, %ymm0
1412 ; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm2, %ymm0
1413 ; HASWELL-NO-FMA-NEXT: vmulps %ymm4, %ymm1, %ymm1
1414 ; HASWELL-NO-FMA-NEXT: vsubps %ymm1, %ymm3, %ymm1
1415 ; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm4, %ymm1
1416 ; HASWELL-NO-FMA-NEXT: vaddps %ymm1, %ymm4, %ymm1
1417 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2
1418 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3
1419 ; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm3, %ymm0
1420 ; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm2, %ymm1
1421 ; HASWELL-NO-FMA-NEXT: retq
1423 ; AVX512-LABEL: v16f32_one_step_2_divs:
1425 ; AVX512-NEXT: vrcp14ps %zmm0, %zmm1
1426 ; AVX512-NEXT: vfnmadd213ps {{.*#+}} zmm0 = -(zmm1 * zmm0) + mem
1427 ; AVX512-NEXT: vfmadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) + zmm1
1428 ; AVX512-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm1
1429 ; AVX512-NEXT: vmulps %zmm0, %zmm1, %zmm0
1431 %div = fdiv fast <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, %x
1432 %div2 = fdiv fast <16 x float> %div, %x
1433 ret <16 x float> %div2
1436 define <16 x float> @v16f32_two_step2(<16 x float> %x) #2 {
1437 ; SSE-LABEL: v16f32_two_step2:
1439 ; SSE-NEXT: movaps %xmm3, %xmm6
1440 ; SSE-NEXT: movaps %xmm2, %xmm5
1441 ; SSE-NEXT: movaps %xmm0, %xmm4
1442 ; SSE-NEXT: rcpps %xmm3, %xmm2
1443 ; SSE-NEXT: mulps %xmm2, %xmm3
1444 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1445 ; SSE-NEXT: movaps %xmm0, %xmm7
1446 ; SSE-NEXT: subps %xmm3, %xmm7
1447 ; SSE-NEXT: mulps %xmm2, %xmm7
1448 ; SSE-NEXT: addps %xmm2, %xmm7
1449 ; SSE-NEXT: mulps %xmm7, %xmm6
1450 ; SSE-NEXT: movaps %xmm0, %xmm3
1451 ; SSE-NEXT: subps %xmm6, %xmm3
1452 ; SSE-NEXT: mulps %xmm7, %xmm3
1453 ; SSE-NEXT: addps %xmm7, %xmm3
1454 ; SSE-NEXT: rcpps %xmm5, %xmm2
1455 ; SSE-NEXT: movaps %xmm5, %xmm6
1456 ; SSE-NEXT: mulps %xmm2, %xmm6
1457 ; SSE-NEXT: movaps %xmm0, %xmm7
1458 ; SSE-NEXT: subps %xmm6, %xmm7
1459 ; SSE-NEXT: mulps %xmm2, %xmm7
1460 ; SSE-NEXT: addps %xmm2, %xmm7
1461 ; SSE-NEXT: mulps %xmm7, %xmm5
1462 ; SSE-NEXT: movaps %xmm0, %xmm2
1463 ; SSE-NEXT: subps %xmm5, %xmm2
1464 ; SSE-NEXT: mulps %xmm7, %xmm2
1465 ; SSE-NEXT: addps %xmm7, %xmm2
1466 ; SSE-NEXT: rcpps %xmm1, %xmm5
1467 ; SSE-NEXT: movaps %xmm1, %xmm6
1468 ; SSE-NEXT: mulps %xmm5, %xmm6
1469 ; SSE-NEXT: movaps %xmm0, %xmm7
1470 ; SSE-NEXT: subps %xmm6, %xmm7
1471 ; SSE-NEXT: mulps %xmm5, %xmm7
1472 ; SSE-NEXT: addps %xmm5, %xmm7
1473 ; SSE-NEXT: mulps %xmm7, %xmm1
1474 ; SSE-NEXT: movaps %xmm0, %xmm5
1475 ; SSE-NEXT: subps %xmm1, %xmm5
1476 ; SSE-NEXT: mulps %xmm7, %xmm5
1477 ; SSE-NEXT: addps %xmm7, %xmm5
1478 ; SSE-NEXT: rcpps %xmm4, %xmm1
1479 ; SSE-NEXT: movaps %xmm4, %xmm6
1480 ; SSE-NEXT: mulps %xmm1, %xmm6
1481 ; SSE-NEXT: movaps %xmm0, %xmm7
1482 ; SSE-NEXT: subps %xmm6, %xmm7
1483 ; SSE-NEXT: mulps %xmm1, %xmm7
1484 ; SSE-NEXT: addps %xmm1, %xmm7
1485 ; SSE-NEXT: mulps %xmm7, %xmm4
1486 ; SSE-NEXT: subps %xmm4, %xmm0
1487 ; SSE-NEXT: mulps %xmm7, %xmm0
1488 ; SSE-NEXT: addps %xmm7, %xmm0
1489 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm0
1490 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm5
1491 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm2
1492 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm3
1493 ; SSE-NEXT: movaps %xmm5, %xmm1
1496 ; AVX-RECIP-LABEL: v16f32_two_step2:
1497 ; AVX-RECIP: # %bb.0:
1498 ; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm2
1499 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm3
1500 ; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm4 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1501 ; AVX-RECIP-NEXT: vsubps %ymm3, %ymm4, %ymm3
1502 ; AVX-RECIP-NEXT: vmulps %ymm3, %ymm2, %ymm3
1503 ; AVX-RECIP-NEXT: vaddps %ymm3, %ymm2, %ymm2
1504 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm1
1505 ; AVX-RECIP-NEXT: vsubps %ymm1, %ymm4, %ymm1
1506 ; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1
1507 ; AVX-RECIP-NEXT: vaddps %ymm1, %ymm2, %ymm1
1508 ; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm2
1509 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm3
1510 ; AVX-RECIP-NEXT: vsubps %ymm3, %ymm4, %ymm3
1511 ; AVX-RECIP-NEXT: vmulps %ymm3, %ymm2, %ymm3
1512 ; AVX-RECIP-NEXT: vaddps %ymm3, %ymm2, %ymm2
1513 ; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm0
1514 ; AVX-RECIP-NEXT: vsubps %ymm0, %ymm4, %ymm0
1515 ; AVX-RECIP-NEXT: vmulps %ymm0, %ymm2, %ymm0
1516 ; AVX-RECIP-NEXT: vaddps %ymm0, %ymm2, %ymm0
1517 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1518 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1519 ; AVX-RECIP-NEXT: retq
1521 ; FMA-RECIP-LABEL: v16f32_two_step2:
1522 ; FMA-RECIP: # %bb.0:
1523 ; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm2
1524 ; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1525 ; FMA-RECIP-NEXT: vmovaps %ymm2, %ymm4
1526 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm1 * ymm4) + ymm3
1527 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2
1528 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm4 * ymm1) + ymm3
1529 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm4) + ymm4
1530 ; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm2
1531 ; FMA-RECIP-NEXT: vmovaps %ymm2, %ymm4
1532 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm0 * ymm4) + ymm3
1533 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2
1534 ; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm4 * ymm0) + ymm3
1535 ; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm4) + ymm4
1536 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1537 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1538 ; FMA-RECIP-NEXT: retq
1540 ; BDVER2-LABEL: v16f32_two_step2:
1542 ; BDVER2-NEXT: vrcpps %ymm1, %ymm2
1543 ; BDVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1544 ; BDVER2-NEXT: vfnmaddps %ymm3, %ymm2, %ymm1, %ymm4
1545 ; BDVER2-NEXT: vfmaddps %ymm2, %ymm4, %ymm2, %ymm2
1546 ; BDVER2-NEXT: vfnmaddps %ymm3, %ymm2, %ymm1, %ymm1
1547 ; BDVER2-NEXT: vfmaddps %ymm2, %ymm1, %ymm2, %ymm1
1548 ; BDVER2-NEXT: vrcpps %ymm0, %ymm2
1549 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1550 ; BDVER2-NEXT: vfnmaddps %ymm3, %ymm2, %ymm0, %ymm4
1551 ; BDVER2-NEXT: vfmaddps %ymm2, %ymm4, %ymm2, %ymm2
1552 ; BDVER2-NEXT: vfnmaddps %ymm3, %ymm2, %ymm0, %ymm0
1553 ; BDVER2-NEXT: vfmaddps %ymm2, %ymm0, %ymm2, %ymm0
1554 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1557 ; BTVER2-LABEL: v16f32_two_step2:
1559 ; BTVER2-NEXT: vmovaps {{.*#+}} ymm4 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1560 ; BTVER2-NEXT: vrcpps %ymm1, %ymm2
1561 ; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm3
1562 ; BTVER2-NEXT: vsubps %ymm3, %ymm4, %ymm3
1563 ; BTVER2-NEXT: vmulps %ymm3, %ymm2, %ymm3
1564 ; BTVER2-NEXT: vaddps %ymm3, %ymm2, %ymm2
1565 ; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm1
1566 ; BTVER2-NEXT: vsubps %ymm1, %ymm4, %ymm1
1567 ; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1
1568 ; BTVER2-NEXT: vaddps %ymm1, %ymm2, %ymm1
1569 ; BTVER2-NEXT: vrcpps %ymm0, %ymm2
1570 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1571 ; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm3
1572 ; BTVER2-NEXT: vsubps %ymm3, %ymm4, %ymm3
1573 ; BTVER2-NEXT: vmulps %ymm3, %ymm2, %ymm3
1574 ; BTVER2-NEXT: vaddps %ymm3, %ymm2, %ymm2
1575 ; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm0
1576 ; BTVER2-NEXT: vsubps %ymm0, %ymm4, %ymm0
1577 ; BTVER2-NEXT: vmulps %ymm0, %ymm2, %ymm0
1578 ; BTVER2-NEXT: vaddps %ymm0, %ymm2, %ymm0
1579 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1582 ; SANDY-LABEL: v16f32_two_step2:
1584 ; SANDY-NEXT: vrcpps %ymm1, %ymm2
1585 ; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm3
1586 ; SANDY-NEXT: vmovaps {{.*#+}} ymm4 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1587 ; SANDY-NEXT: vsubps %ymm3, %ymm4, %ymm3
1588 ; SANDY-NEXT: vmulps %ymm3, %ymm2, %ymm3
1589 ; SANDY-NEXT: vaddps %ymm3, %ymm2, %ymm2
1590 ; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm1
1591 ; SANDY-NEXT: vsubps %ymm1, %ymm4, %ymm1
1592 ; SANDY-NEXT: vmulps %ymm1, %ymm2, %ymm1
1593 ; SANDY-NEXT: vaddps %ymm1, %ymm2, %ymm1
1594 ; SANDY-NEXT: vrcpps %ymm0, %ymm2
1595 ; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm3
1596 ; SANDY-NEXT: vsubps %ymm3, %ymm4, %ymm3
1597 ; SANDY-NEXT: vmulps %ymm3, %ymm2, %ymm3
1598 ; SANDY-NEXT: vaddps %ymm3, %ymm2, %ymm2
1599 ; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm0
1600 ; SANDY-NEXT: vsubps %ymm0, %ymm4, %ymm0
1601 ; SANDY-NEXT: vmulps %ymm0, %ymm2, %ymm0
1602 ; SANDY-NEXT: vaddps %ymm0, %ymm2, %ymm0
1603 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1604 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1607 ; HASWELL-LABEL: v16f32_two_step2:
1609 ; HASWELL-NEXT: vrcpps %ymm1, %ymm2
1610 ; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1611 ; HASWELL-NEXT: vmovaps %ymm2, %ymm4
1612 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm1 * ymm4) + ymm3
1613 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2
1614 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm4 * ymm1) + ymm3
1615 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm4) + ymm4
1616 ; HASWELL-NEXT: vrcpps %ymm0, %ymm2
1617 ; HASWELL-NEXT: vmovaps %ymm2, %ymm4
1618 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm0 * ymm4) + ymm3
1619 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2
1620 ; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm4 * ymm0) + ymm3
1621 ; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm4) + ymm4
1622 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1623 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1624 ; HASWELL-NEXT: retq
1626 ; HASWELL-NO-FMA-LABEL: v16f32_two_step2:
1627 ; HASWELL-NO-FMA: # %bb.0:
1628 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm2
1629 ; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm3
1630 ; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm4 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1631 ; HASWELL-NO-FMA-NEXT: vsubps %ymm3, %ymm4, %ymm3
1632 ; HASWELL-NO-FMA-NEXT: vmulps %ymm3, %ymm2, %ymm3
1633 ; HASWELL-NO-FMA-NEXT: vaddps %ymm3, %ymm2, %ymm2
1634 ; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm1
1635 ; HASWELL-NO-FMA-NEXT: vsubps %ymm1, %ymm4, %ymm1
1636 ; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm2, %ymm1
1637 ; HASWELL-NO-FMA-NEXT: vaddps %ymm1, %ymm2, %ymm1
1638 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm2
1639 ; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm3
1640 ; HASWELL-NO-FMA-NEXT: vsubps %ymm3, %ymm4, %ymm3
1641 ; HASWELL-NO-FMA-NEXT: vmulps %ymm3, %ymm2, %ymm3
1642 ; HASWELL-NO-FMA-NEXT: vaddps %ymm3, %ymm2, %ymm2
1643 ; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0
1644 ; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm4, %ymm0
1645 ; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm2, %ymm0
1646 ; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm2, %ymm0
1647 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1648 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1649 ; HASWELL-NO-FMA-NEXT: retq
1651 ; AVX512-LABEL: v16f32_two_step2:
1653 ; AVX512-NEXT: vrcp14ps %zmm0, %zmm1
1654 ; AVX512-NEXT: vbroadcastss {{.*#+}} zmm2 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
1655 ; AVX512-NEXT: vmovaps %zmm1, %zmm3
1656 ; AVX512-NEXT: vfnmadd213ps {{.*#+}} zmm3 = -(zmm0 * zmm3) + zmm2
1657 ; AVX512-NEXT: vfmadd132ps {{.*#+}} zmm3 = (zmm3 * zmm1) + zmm1
1658 ; AVX512-NEXT: vfnmadd213ps {{.*#+}} zmm0 = -(zmm3 * zmm0) + zmm2
1659 ; AVX512-NEXT: vfmadd132ps {{.*#+}} zmm0 = (zmm0 * zmm3) + zmm3
1660 ; AVX512-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
1662 %div = fdiv fast <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, %x
1663 ret <16 x float> %div
1666 define <16 x float> @v16f32_no_step(<16 x float> %x) #3 {
1667 ; SSE-LABEL: v16f32_no_step:
1669 ; SSE-NEXT: rcpps %xmm0, %xmm0
1670 ; SSE-NEXT: rcpps %xmm1, %xmm1
1671 ; SSE-NEXT: rcpps %xmm2, %xmm2
1672 ; SSE-NEXT: rcpps %xmm3, %xmm3
1675 ; AVX-RECIP-LABEL: v16f32_no_step:
1676 ; AVX-RECIP: # %bb.0:
1677 ; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm0
1678 ; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm1
1679 ; AVX-RECIP-NEXT: retq
1681 ; FMA-RECIP-LABEL: v16f32_no_step:
1682 ; FMA-RECIP: # %bb.0:
1683 ; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm0
1684 ; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm1
1685 ; FMA-RECIP-NEXT: retq
1687 ; BDVER2-LABEL: v16f32_no_step:
1689 ; BDVER2-NEXT: vrcpps %ymm0, %ymm0
1690 ; BDVER2-NEXT: vrcpps %ymm1, %ymm1
1693 ; BTVER2-LABEL: v16f32_no_step:
1695 ; BTVER2-NEXT: vrcpps %ymm0, %ymm0
1696 ; BTVER2-NEXT: vrcpps %ymm1, %ymm1
1699 ; SANDY-LABEL: v16f32_no_step:
1701 ; SANDY-NEXT: vrcpps %ymm0, %ymm0
1702 ; SANDY-NEXT: vrcpps %ymm1, %ymm1
1705 ; HASWELL-LABEL: v16f32_no_step:
1707 ; HASWELL-NEXT: vrcpps %ymm0, %ymm0
1708 ; HASWELL-NEXT: vrcpps %ymm1, %ymm1
1709 ; HASWELL-NEXT: retq
1711 ; HASWELL-NO-FMA-LABEL: v16f32_no_step:
1712 ; HASWELL-NO-FMA: # %bb.0:
1713 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm0
1714 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm1
1715 ; HASWELL-NO-FMA-NEXT: retq
1717 ; AVX512-LABEL: v16f32_no_step:
1719 ; AVX512-NEXT: vrcp14ps %zmm0, %zmm0
1721 %div = fdiv fast <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x
1722 ret <16 x float> %div
1725 define <16 x float> @v16f32_no_step2(<16 x float> %x) #3 {
1726 ; SSE-LABEL: v16f32_no_step2:
1728 ; SSE-NEXT: rcpps %xmm3, %xmm3
1729 ; SSE-NEXT: rcpps %xmm2, %xmm2
1730 ; SSE-NEXT: rcpps %xmm1, %xmm1
1731 ; SSE-NEXT: rcpps %xmm0, %xmm0
1732 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm0
1733 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
1734 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm2
1735 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm3
1738 ; AVX-RECIP-LABEL: v16f32_no_step2:
1739 ; AVX-RECIP: # %bb.0:
1740 ; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm1
1741 ; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm0
1742 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1743 ; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1744 ; AVX-RECIP-NEXT: retq
1746 ; FMA-RECIP-LABEL: v16f32_no_step2:
1747 ; FMA-RECIP: # %bb.0:
1748 ; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm1
1749 ; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm0
1750 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1751 ; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1752 ; FMA-RECIP-NEXT: retq
1754 ; BDVER2-LABEL: v16f32_no_step2:
1756 ; BDVER2-NEXT: vrcpps %ymm1, %ymm1
1757 ; BDVER2-NEXT: vrcpps %ymm0, %ymm0
1758 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1759 ; BDVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1762 ; BTVER2-LABEL: v16f32_no_step2:
1764 ; BTVER2-NEXT: vrcpps %ymm1, %ymm1
1765 ; BTVER2-NEXT: vrcpps %ymm0, %ymm0
1766 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1767 ; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1770 ; SANDY-LABEL: v16f32_no_step2:
1772 ; SANDY-NEXT: vrcpps %ymm1, %ymm1
1773 ; SANDY-NEXT: vrcpps %ymm0, %ymm0
1774 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1775 ; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1778 ; HASWELL-LABEL: v16f32_no_step2:
1780 ; HASWELL-NEXT: vrcpps %ymm1, %ymm1
1781 ; HASWELL-NEXT: vrcpps %ymm0, %ymm0
1782 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1783 ; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1784 ; HASWELL-NEXT: retq
1786 ; HASWELL-NO-FMA-LABEL: v16f32_no_step2:
1787 ; HASWELL-NO-FMA: # %bb.0:
1788 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm1
1789 ; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm0
1790 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
1791 ; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
1792 ; HASWELL-NO-FMA-NEXT: retq
1794 ; AVX512-LABEL: v16f32_no_step2:
1796 ; AVX512-NEXT: vrcp14ps %zmm0, %zmm0
1797 ; AVX512-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
1799 %div = fdiv fast <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, %x
1800 ret <16 x float> %div
1803 attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="!divf,!vec-divf" }
1804 attributes #1 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf,vec-divf" }
1805 attributes #2 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:2,vec-divf:2" }
1806 attributes #3 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:0,vec-divf:0" }