1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx2,fma | FileCheck %s --check-prefixes=CHECK,X64
3 ; RUN: llc < %s -mtriple=i686-- -mattr=avx2,fma | FileCheck %s --check-prefixes=CHECK,X86
5 define float @fneg_v4f32(<4 x float> %x) nounwind {
6 ; X64-LABEL: fneg_v4f32:
8 ; X64-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
9 ; X64-NEXT: vxorps %xmm1, %xmm0, %xmm0
12 ; X86-LABEL: fneg_v4f32:
14 ; X86-NEXT: pushl %eax
15 ; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
16 ; X86-NEXT: vxorps %xmm1, %xmm0, %xmm0
17 ; X86-NEXT: vmovss %xmm0, (%esp)
18 ; X86-NEXT: flds (%esp)
21 %v = fneg <4 x float> %x
22 %r = extractelement <4 x float> %v, i32 0
26 define double @fneg_v4f64(<4 x double> %x) nounwind {
27 ; X64-LABEL: fneg_v4f64:
29 ; X64-NEXT: vmovddup {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0]
30 ; X64-NEXT: # xmm1 = mem[0,0]
31 ; X64-NEXT: vxorps %xmm1, %xmm0, %xmm0
32 ; X64-NEXT: vzeroupper
35 ; X86-LABEL: fneg_v4f64:
37 ; X86-NEXT: pushl %ebp
38 ; X86-NEXT: movl %esp, %ebp
39 ; X86-NEXT: andl $-8, %esp
40 ; X86-NEXT: subl $8, %esp
41 ; X86-NEXT: vmovddup {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0]
42 ; X86-NEXT: # xmm1 = mem[0,0]
43 ; X86-NEXT: vxorps %xmm1, %xmm0, %xmm0
44 ; X86-NEXT: vmovlps %xmm0, (%esp)
45 ; X86-NEXT: fldl (%esp)
46 ; X86-NEXT: movl %ebp, %esp
48 ; X86-NEXT: vzeroupper
50 %v = fneg <4 x double> %x
51 %r = extractelement <4 x double> %v, i32 0
55 define float @fadd_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
56 ; X64-LABEL: fadd_v4f32:
58 ; X64-NEXT: vaddss %xmm1, %xmm0, %xmm0
61 ; X86-LABEL: fadd_v4f32:
63 ; X86-NEXT: pushl %eax
64 ; X86-NEXT: vaddss %xmm1, %xmm0, %xmm0
65 ; X86-NEXT: vmovss %xmm0, (%esp)
66 ; X86-NEXT: flds (%esp)
69 %v = fadd <4 x float> %x, %y
70 %r = extractelement <4 x float> %v, i32 0
74 define double @fadd_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
75 ; X64-LABEL: fadd_v4f64:
77 ; X64-NEXT: vaddsd %xmm1, %xmm0, %xmm0
78 ; X64-NEXT: vzeroupper
81 ; X86-LABEL: fadd_v4f64:
83 ; X86-NEXT: pushl %ebp
84 ; X86-NEXT: movl %esp, %ebp
85 ; X86-NEXT: andl $-8, %esp
86 ; X86-NEXT: subl $8, %esp
87 ; X86-NEXT: vaddsd %xmm1, %xmm0, %xmm0
88 ; X86-NEXT: vmovsd %xmm0, (%esp)
89 ; X86-NEXT: fldl (%esp)
90 ; X86-NEXT: movl %ebp, %esp
92 ; X86-NEXT: vzeroupper
94 %v = fadd <4 x double> %x, %y
95 %r = extractelement <4 x double> %v, i32 0
99 define float @fsub_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
100 ; X64-LABEL: fsub_v4f32:
102 ; X64-NEXT: vsubss %xmm1, %xmm0, %xmm0
105 ; X86-LABEL: fsub_v4f32:
107 ; X86-NEXT: pushl %eax
108 ; X86-NEXT: vsubss %xmm1, %xmm0, %xmm0
109 ; X86-NEXT: vmovss %xmm0, (%esp)
110 ; X86-NEXT: flds (%esp)
111 ; X86-NEXT: popl %eax
113 %v = fsub <4 x float> %x, %y
114 %r = extractelement <4 x float> %v, i32 0
118 define double @fsub_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
119 ; X64-LABEL: fsub_v4f64:
121 ; X64-NEXT: vsubsd %xmm1, %xmm0, %xmm0
122 ; X64-NEXT: vzeroupper
125 ; X86-LABEL: fsub_v4f64:
127 ; X86-NEXT: pushl %ebp
128 ; X86-NEXT: movl %esp, %ebp
129 ; X86-NEXT: andl $-8, %esp
130 ; X86-NEXT: subl $8, %esp
131 ; X86-NEXT: vsubsd %xmm1, %xmm0, %xmm0
132 ; X86-NEXT: vmovsd %xmm0, (%esp)
133 ; X86-NEXT: fldl (%esp)
134 ; X86-NEXT: movl %ebp, %esp
135 ; X86-NEXT: popl %ebp
136 ; X86-NEXT: vzeroupper
138 %v = fsub <4 x double> %x, %y
139 %r = extractelement <4 x double> %v, i32 0
143 define float @fmul_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
144 ; X64-LABEL: fmul_v4f32:
146 ; X64-NEXT: vmulss %xmm1, %xmm0, %xmm0
149 ; X86-LABEL: fmul_v4f32:
151 ; X86-NEXT: pushl %eax
152 ; X86-NEXT: vmulss %xmm1, %xmm0, %xmm0
153 ; X86-NEXT: vmovss %xmm0, (%esp)
154 ; X86-NEXT: flds (%esp)
155 ; X86-NEXT: popl %eax
157 %v = fmul <4 x float> %x, %y
158 %r = extractelement <4 x float> %v, i32 0
162 define double @fmul_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
163 ; X64-LABEL: fmul_v4f64:
165 ; X64-NEXT: vmulsd %xmm1, %xmm0, %xmm0
166 ; X64-NEXT: vzeroupper
169 ; X86-LABEL: fmul_v4f64:
171 ; X86-NEXT: pushl %ebp
172 ; X86-NEXT: movl %esp, %ebp
173 ; X86-NEXT: andl $-8, %esp
174 ; X86-NEXT: subl $8, %esp
175 ; X86-NEXT: vmulsd %xmm1, %xmm0, %xmm0
176 ; X86-NEXT: vmovsd %xmm0, (%esp)
177 ; X86-NEXT: fldl (%esp)
178 ; X86-NEXT: movl %ebp, %esp
179 ; X86-NEXT: popl %ebp
180 ; X86-NEXT: vzeroupper
182 %v = fmul <4 x double> %x, %y
183 %r = extractelement <4 x double> %v, i32 0
187 define float @fdiv_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
188 ; X64-LABEL: fdiv_v4f32:
190 ; X64-NEXT: vdivss %xmm1, %xmm0, %xmm0
193 ; X86-LABEL: fdiv_v4f32:
195 ; X86-NEXT: pushl %eax
196 ; X86-NEXT: vdivss %xmm1, %xmm0, %xmm0
197 ; X86-NEXT: vmovss %xmm0, (%esp)
198 ; X86-NEXT: flds (%esp)
199 ; X86-NEXT: popl %eax
201 %v = fdiv <4 x float> %x, %y
202 %r = extractelement <4 x float> %v, i32 0
206 define double @fdiv_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
207 ; X64-LABEL: fdiv_v4f64:
209 ; X64-NEXT: vdivsd %xmm1, %xmm0, %xmm0
210 ; X64-NEXT: vzeroupper
213 ; X86-LABEL: fdiv_v4f64:
215 ; X86-NEXT: pushl %ebp
216 ; X86-NEXT: movl %esp, %ebp
217 ; X86-NEXT: andl $-8, %esp
218 ; X86-NEXT: subl $8, %esp
219 ; X86-NEXT: vdivsd %xmm1, %xmm0, %xmm0
220 ; X86-NEXT: vmovsd %xmm0, (%esp)
221 ; X86-NEXT: fldl (%esp)
222 ; X86-NEXT: movl %ebp, %esp
223 ; X86-NEXT: popl %ebp
224 ; X86-NEXT: vzeroupper
226 %v = fdiv <4 x double> %x, %y
227 %r = extractelement <4 x double> %v, i32 0
231 define float @frem_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
232 ; X64-LABEL: frem_v4f32:
234 ; X64-NEXT: jmp fmodf # TAILCALL
236 ; X86-LABEL: frem_v4f32:
238 ; X86-NEXT: subl $8, %esp
239 ; X86-NEXT: vmovss %xmm1, {{[0-9]+}}(%esp)
240 ; X86-NEXT: vmovss %xmm0, (%esp)
241 ; X86-NEXT: calll fmodf
242 ; X86-NEXT: addl $8, %esp
244 %v = frem <4 x float> %x, %y
245 %r = extractelement <4 x float> %v, i32 0
249 define double @frem_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
250 ; X64-LABEL: frem_v4f64:
252 ; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
253 ; X64-NEXT: # kill: def $xmm1 killed $xmm1 killed $ymm1
254 ; X64-NEXT: vzeroupper
255 ; X64-NEXT: jmp fmod # TAILCALL
257 ; X86-LABEL: frem_v4f64:
259 ; X86-NEXT: subl $16, %esp
260 ; X86-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
261 ; X86-NEXT: vmovups %xmm0, (%esp)
262 ; X86-NEXT: vzeroupper
263 ; X86-NEXT: calll fmod
264 ; X86-NEXT: addl $16, %esp
266 %v = frem <4 x double> %x, %y
267 %r = extractelement <4 x double> %v, i32 0
271 define i1 @fcmp_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
272 ; CHECK-LABEL: fcmp_v4f32:
274 ; CHECK-NEXT: vucomiss %xmm1, %xmm0
275 ; CHECK-NEXT: seta %al
276 ; CHECK-NEXT: ret{{[l|q]}}
277 %v = fcmp ogt <4 x float> %x, %y
278 %r = extractelement <4 x i1> %v, i32 0
282 define i1 @fcmp_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
283 ; CHECK-LABEL: fcmp_v4f64:
285 ; CHECK-NEXT: vucomisd %xmm0, %xmm1
286 ; CHECK-NEXT: setb %al
287 ; CHECK-NEXT: vzeroupper
288 ; CHECK-NEXT: ret{{[l|q]}}
289 %v = fcmp ugt <4 x double> %x, %y
290 %r = extractelement <4 x i1> %v, i32 0
294 ; If we do the fcmp transform late, make sure we have the right types.
295 ; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=13700
297 define void @extsetcc(<4 x float> %x) {
298 ; X64-LABEL: extsetcc:
300 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
301 ; X64-NEXT: vucomiss %xmm1, %xmm0
302 ; X64-NEXT: setb (%rax)
305 ; X86-LABEL: extsetcc:
307 ; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
308 ; X86-NEXT: vucomiss %xmm1, %xmm0
309 ; X86-NEXT: setb (%eax)
311 %cmp = fcmp ult <4 x float> %x, zeroinitializer
312 %sext = sext <4 x i1> %cmp to <4 x i32>
313 %e = extractelement <4 x i1> %cmp, i1 0
314 store i1 %e, i1* undef
318 ; This used to crash by creating a setcc with an i64 condition on a 32-bit target.
319 define <3 x double> @extvselectsetcc_crash(<2 x double> %x) {
320 ; X64-LABEL: extvselectsetcc_crash:
322 ; X64-NEXT: vcmpeqpd {{.*}}(%rip), %xmm0, %xmm1
323 ; X64-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
324 ; X64-NEXT: vandpd %xmm2, %xmm1, %xmm1
325 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
326 ; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,3,3]
329 ; X86-LABEL: extvselectsetcc_crash:
331 ; X86-NEXT: vcmpeqpd {{\.LCPI.*}}, %xmm0, %xmm1
332 ; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
333 ; X86-NEXT: vandpd %xmm2, %xmm1, %xmm1
334 ; X86-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
335 ; X86-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,3,3]
337 %cmp = fcmp oeq <2 x double> %x, <double 5.0, double 5.0>
338 %s = select <2 x i1> %cmp, <2 x double> <double 1.0, double undef>, <2 x double> <double 0.0, double undef>
339 %r = shufflevector <2 x double> %s, <2 x double> %x, <3 x i32> <i32 0, i32 2, i32 3>
343 define float @select_fcmp_v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %w) nounwind {
344 ; X64-LABEL: select_fcmp_v4f32:
346 ; X64-NEXT: vcmpneq_oqss %xmm1, %xmm0, %xmm0
347 ; X64-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
350 ; X86-LABEL: select_fcmp_v4f32:
352 ; X86-NEXT: pushl %ebp
353 ; X86-NEXT: movl %esp, %ebp
354 ; X86-NEXT: andl $-16, %esp
355 ; X86-NEXT: subl $16, %esp
356 ; X86-NEXT: vmovaps 8(%ebp), %xmm3
357 ; X86-NEXT: vcmpneq_oqss %xmm1, %xmm0, %xmm0
358 ; X86-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
359 ; X86-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
360 ; X86-NEXT: flds {{[0-9]+}}(%esp)
361 ; X86-NEXT: movl %ebp, %esp
362 ; X86-NEXT: popl %ebp
364 %c = fcmp one <4 x float> %x, %y
365 %s = select <4 x i1> %c, <4 x float> %z, <4 x float> %w
366 %r = extractelement <4 x float> %s, i32 0
370 define double @select_fcmp_v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %z, <4 x double> %w) nounwind {
371 ; X64-LABEL: select_fcmp_v4f64:
373 ; X64-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm0
374 ; X64-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
375 ; X64-NEXT: vzeroupper
378 ; X86-LABEL: select_fcmp_v4f64:
380 ; X86-NEXT: pushl %ebp
381 ; X86-NEXT: movl %esp, %ebp
382 ; X86-NEXT: andl $-32, %esp
383 ; X86-NEXT: subl $32, %esp
384 ; X86-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm0
385 ; X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
386 ; X86-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0
387 ; X86-NEXT: vmovlpd %xmm0, {{[0-9]+}}(%esp)
388 ; X86-NEXT: fldl {{[0-9]+}}(%esp)
389 ; X86-NEXT: movl %ebp, %esp
390 ; X86-NEXT: popl %ebp
391 ; X86-NEXT: vzeroupper
393 %c = fcmp ule <4 x double> %x, %y
394 %s = select <4 x i1> %c, <4 x double> %z, <4 x double> %w
395 %r = extractelement <4 x double> %s, i32 0
399 define float @fsqrt_v4f32(<4 x float> %x) nounwind {
400 ; X64-LABEL: fsqrt_v4f32:
402 ; X64-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
405 ; X86-LABEL: fsqrt_v4f32:
407 ; X86-NEXT: pushl %eax
408 ; X86-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
409 ; X86-NEXT: vmovss %xmm0, (%esp)
410 ; X86-NEXT: flds (%esp)
411 ; X86-NEXT: popl %eax
413 %v = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %x)
414 %r = extractelement <4 x float> %v, i32 0
418 define double @fsqrt_v4f64(<4 x double> %x) nounwind {
419 ; X64-LABEL: fsqrt_v4f64:
421 ; X64-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
422 ; X64-NEXT: vzeroupper
425 ; X86-LABEL: fsqrt_v4f64:
427 ; X86-NEXT: pushl %ebp
428 ; X86-NEXT: movl %esp, %ebp
429 ; X86-NEXT: andl $-8, %esp
430 ; X86-NEXT: subl $8, %esp
431 ; X86-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
432 ; X86-NEXT: vmovsd %xmm0, (%esp)
433 ; X86-NEXT: fldl (%esp)
434 ; X86-NEXT: movl %ebp, %esp
435 ; X86-NEXT: popl %ebp
436 ; X86-NEXT: vzeroupper
438 %v = call <4 x double> @llvm.sqrt.v4f64(<4 x double> %x)
439 %r = extractelement <4 x double> %v, i32 0
443 define float @fsin_v4f32(<4 x float> %x) nounwind {
444 ; X64-LABEL: fsin_v4f32:
446 ; X64-NEXT: jmp sinf # TAILCALL
448 ; X86-LABEL: fsin_v4f32:
450 ; X86-NEXT: pushl %eax
451 ; X86-NEXT: vmovss %xmm0, (%esp)
452 ; X86-NEXT: calll sinf
453 ; X86-NEXT: popl %eax
455 %v = call <4 x float> @llvm.sin.v4f32(<4 x float> %x)
456 %r = extractelement <4 x float> %v, i32 0
460 define double @fsin_v4f64(<4 x double> %x) nounwind {
461 ; X64-LABEL: fsin_v4f64:
463 ; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
464 ; X64-NEXT: vzeroupper
465 ; X64-NEXT: jmp sin # TAILCALL
467 ; X86-LABEL: fsin_v4f64:
469 ; X86-NEXT: subl $8, %esp
470 ; X86-NEXT: vmovlps %xmm0, (%esp)
471 ; X86-NEXT: vzeroupper
472 ; X86-NEXT: calll sin
473 ; X86-NEXT: addl $8, %esp
475 %v = call <4 x double> @llvm.sin.v4f64(<4 x double> %x)
476 %r = extractelement <4 x double> %v, i32 0
480 define float @fma_v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z) nounwind {
481 ; X64-LABEL: fma_v4f32:
483 ; X64-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
486 ; X86-LABEL: fma_v4f32:
488 ; X86-NEXT: pushl %eax
489 ; X86-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
490 ; X86-NEXT: vmovss %xmm0, (%esp)
491 ; X86-NEXT: flds (%esp)
492 ; X86-NEXT: popl %eax
494 %v = call <4 x float> @llvm.fma.v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z)
495 %r = extractelement <4 x float> %v, i32 0
499 define double @fma_v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %z) nounwind {
500 ; X64-LABEL: fma_v4f64:
502 ; X64-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
503 ; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
504 ; X64-NEXT: vzeroupper
507 ; X86-LABEL: fma_v4f64:
509 ; X86-NEXT: pushl %ebp
510 ; X86-NEXT: movl %esp, %ebp
511 ; X86-NEXT: andl $-8, %esp
512 ; X86-NEXT: subl $8, %esp
513 ; X86-NEXT: vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm2
514 ; X86-NEXT: vmovsd %xmm1, (%esp)
515 ; X86-NEXT: fldl (%esp)
516 ; X86-NEXT: movl %ebp, %esp
517 ; X86-NEXT: popl %ebp
518 ; X86-NEXT: vzeroupper
520 %v = call <4 x double> @llvm.fma.v4f64(<4 x double> %x, <4 x double> %y, <4 x double> %z)
521 %r = extractelement <4 x double> %v, i32 0
525 define float @fabs_v4f32(<4 x float> %x) nounwind {
526 ; X64-LABEL: fabs_v4f32:
528 ; X64-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
529 ; X64-NEXT: vandps %xmm1, %xmm0, %xmm0
532 ; X86-LABEL: fabs_v4f32:
534 ; X86-NEXT: pushl %eax
535 ; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
536 ; X86-NEXT: vandps %xmm1, %xmm0, %xmm0
537 ; X86-NEXT: vmovss %xmm0, (%esp)
538 ; X86-NEXT: flds (%esp)
539 ; X86-NEXT: popl %eax
541 %v = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x)
542 %r = extractelement <4 x float> %v, i32 0
546 define double @fabs_v4f64(<4 x double> %x) nounwind {
547 ; X64-LABEL: fabs_v4f64:
549 ; X64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
550 ; X64-NEXT: vzeroupper
553 ; X86-LABEL: fabs_v4f64:
555 ; X86-NEXT: pushl %ebp
556 ; X86-NEXT: movl %esp, %ebp
557 ; X86-NEXT: andl $-8, %esp
558 ; X86-NEXT: subl $8, %esp
559 ; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
560 ; X86-NEXT: vmovlps %xmm0, (%esp)
561 ; X86-NEXT: fldl (%esp)
562 ; X86-NEXT: movl %ebp, %esp
563 ; X86-NEXT: popl %ebp
564 ; X86-NEXT: vzeroupper
566 %v = call <4 x double> @llvm.fabs.v4f64(<4 x double> %x)
567 %r = extractelement <4 x double> %v, i32 0
571 define float @fmaxnum_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
572 ; X64-LABEL: fmaxnum_v4f32:
574 ; X64-NEXT: vmaxss %xmm0, %xmm1, %xmm2
575 ; X64-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
576 ; X64-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
579 ; X86-LABEL: fmaxnum_v4f32:
581 ; X86-NEXT: pushl %eax
582 ; X86-NEXT: vmaxss %xmm0, %xmm1, %xmm2
583 ; X86-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
584 ; X86-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
585 ; X86-NEXT: vmovss %xmm0, (%esp)
586 ; X86-NEXT: flds (%esp)
587 ; X86-NEXT: popl %eax
589 %v = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %x, <4 x float> %y)
590 %r = extractelement <4 x float> %v, i32 0
594 define double @fmaxnum_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
595 ; X64-LABEL: fmaxnum_v4f64:
597 ; X64-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
598 ; X64-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
599 ; X64-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
600 ; X64-NEXT: vzeroupper
603 ; X86-LABEL: fmaxnum_v4f64:
605 ; X86-NEXT: pushl %ebp
606 ; X86-NEXT: movl %esp, %ebp
607 ; X86-NEXT: andl $-8, %esp
608 ; X86-NEXT: subl $8, %esp
609 ; X86-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
610 ; X86-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
611 ; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
612 ; X86-NEXT: vmovlpd %xmm0, (%esp)
613 ; X86-NEXT: fldl (%esp)
614 ; X86-NEXT: movl %ebp, %esp
615 ; X86-NEXT: popl %ebp
616 ; X86-NEXT: vzeroupper
618 %v = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %x, <4 x double> %y)
619 %r = extractelement <4 x double> %v, i32 0
623 define float @fminnum_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
624 ; X64-LABEL: fminnum_v4f32:
626 ; X64-NEXT: vminss %xmm0, %xmm1, %xmm2
627 ; X64-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
628 ; X64-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
631 ; X86-LABEL: fminnum_v4f32:
633 ; X86-NEXT: pushl %eax
634 ; X86-NEXT: vminss %xmm0, %xmm1, %xmm2
635 ; X86-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
636 ; X86-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
637 ; X86-NEXT: vmovss %xmm0, (%esp)
638 ; X86-NEXT: flds (%esp)
639 ; X86-NEXT: popl %eax
641 %v = call <4 x float> @llvm.minnum.v4f32(<4 x float> %x, <4 x float> %y)
642 %r = extractelement <4 x float> %v, i32 0
646 define double @fminnum_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
647 ; X64-LABEL: fminnum_v4f64:
649 ; X64-NEXT: vminsd %xmm0, %xmm1, %xmm2
650 ; X64-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
651 ; X64-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
652 ; X64-NEXT: vzeroupper
655 ; X86-LABEL: fminnum_v4f64:
657 ; X86-NEXT: pushl %ebp
658 ; X86-NEXT: movl %esp, %ebp
659 ; X86-NEXT: andl $-8, %esp
660 ; X86-NEXT: subl $8, %esp
661 ; X86-NEXT: vminsd %xmm0, %xmm1, %xmm2
662 ; X86-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
663 ; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
664 ; X86-NEXT: vmovlpd %xmm0, (%esp)
665 ; X86-NEXT: fldl (%esp)
666 ; X86-NEXT: movl %ebp, %esp
667 ; X86-NEXT: popl %ebp
668 ; X86-NEXT: vzeroupper
670 %v = call <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y)
671 %r = extractelement <4 x double> %v, i32 0
675 ;define float @fmaximum_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
676 ; %v = call <4 x float> @llvm.maximum.v4f32(<4 x float> %x, <4 x float> %y)
677 ; %r = extractelement <4 x float> %v, i32 0
681 ;define double @fmaximum_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
682 ; %v = call <4 x double> @llvm.maximum.v4f64(<4 x double> %x, <4 x double> %y)
683 ; %r = extractelement <4 x double> %v, i32 0
687 ;define float @fminimum_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
688 ; %v = call <4 x float> @llvm.minimum.v4f32(<4 x float> %x, <4 x float> %y)
689 ; %r = extractelement <4 x float> %v, i32 0
693 ;define double @fminimum_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
694 ; %v = call <4 x double> @llvm.minimum.v4f64(<4 x double> %x, <4 x double> %y)
695 ; %r = extractelement <4 x double> %v, i32 0
699 define float @maxps_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
700 ; X64-LABEL: maxps_v4f32:
702 ; X64-NEXT: vmaxss %xmm1, %xmm0, %xmm0
705 ; X86-LABEL: maxps_v4f32:
707 ; X86-NEXT: pushl %eax
708 ; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm0
709 ; X86-NEXT: vmovss %xmm0, (%esp)
710 ; X86-NEXT: flds (%esp)
711 ; X86-NEXT: popl %eax
713 %cmp = fcmp ogt <4 x float> %x, %y
714 %v = select <4 x i1> %cmp, <4 x float> %x, <4 x float> %y
715 %r = extractelement <4 x float> %v, i32 0
719 define double @maxpd_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
720 ; X64-LABEL: maxpd_v4f64:
722 ; X64-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
723 ; X64-NEXT: vzeroupper
726 ; X86-LABEL: maxpd_v4f64:
728 ; X86-NEXT: pushl %ebp
729 ; X86-NEXT: movl %esp, %ebp
730 ; X86-NEXT: andl $-8, %esp
731 ; X86-NEXT: subl $8, %esp
732 ; X86-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
733 ; X86-NEXT: vmovsd %xmm0, (%esp)
734 ; X86-NEXT: fldl (%esp)
735 ; X86-NEXT: movl %ebp, %esp
736 ; X86-NEXT: popl %ebp
737 ; X86-NEXT: vzeroupper
739 %cmp = fcmp ogt <4 x double> %x, %y
740 %v = select <4 x i1> %cmp, <4 x double> %x, <4 x double> %y
741 %r = extractelement <4 x double> %v, i32 0
745 define float @minps_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
746 ; X64-LABEL: minps_v4f32:
748 ; X64-NEXT: vminss %xmm1, %xmm0, %xmm0
751 ; X86-LABEL: minps_v4f32:
753 ; X86-NEXT: pushl %eax
754 ; X86-NEXT: vminss %xmm1, %xmm0, %xmm0
755 ; X86-NEXT: vmovss %xmm0, (%esp)
756 ; X86-NEXT: flds (%esp)
757 ; X86-NEXT: popl %eax
759 %cmp = fcmp olt <4 x float> %x, %y
760 %v = select <4 x i1> %cmp, <4 x float> %x, <4 x float> %y
761 %r = extractelement <4 x float> %v, i32 0
765 define double @minpd_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
766 ; X64-LABEL: minpd_v4f64:
768 ; X64-NEXT: vminsd %xmm1, %xmm0, %xmm0
769 ; X64-NEXT: vzeroupper
772 ; X86-LABEL: minpd_v4f64:
774 ; X86-NEXT: pushl %ebp
775 ; X86-NEXT: movl %esp, %ebp
776 ; X86-NEXT: andl $-8, %esp
777 ; X86-NEXT: subl $8, %esp
778 ; X86-NEXT: vminsd %xmm1, %xmm0, %xmm0
779 ; X86-NEXT: vmovsd %xmm0, (%esp)
780 ; X86-NEXT: fldl (%esp)
781 ; X86-NEXT: movl %ebp, %esp
782 ; X86-NEXT: popl %ebp
783 ; X86-NEXT: vzeroupper
785 %cmp = fcmp olt <4 x double> %x, %y
786 %v = select <4 x i1> %cmp, <4 x double> %x, <4 x double> %y
787 %r = extractelement <4 x double> %v, i32 0
791 define float @copysign_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
792 ; X64-LABEL: copysign_v4f32:
794 ; X64-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
795 ; X64-NEXT: vandps %xmm2, %xmm1, %xmm1
796 ; X64-NEXT: vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
797 ; X64-NEXT: vandps %xmm2, %xmm0, %xmm0
798 ; X64-NEXT: vorps %xmm1, %xmm0, %xmm0
801 ; X86-LABEL: copysign_v4f32:
803 ; X86-NEXT: pushl %eax
804 ; X86-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
805 ; X86-NEXT: vandps %xmm2, %xmm1, %xmm1
806 ; X86-NEXT: vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
807 ; X86-NEXT: vandps %xmm2, %xmm0, %xmm0
808 ; X86-NEXT: vorps %xmm1, %xmm0, %xmm0
809 ; X86-NEXT: vmovss %xmm0, (%esp)
810 ; X86-NEXT: flds (%esp)
811 ; X86-NEXT: popl %eax
813 %v = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %y)
814 %r = extractelement <4 x float> %v, i32 0
818 define double @copysign_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
819 ; X64-LABEL: copysign_v4f64:
821 ; X64-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
822 ; X64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
823 ; X64-NEXT: vorps %xmm1, %xmm0, %xmm0
824 ; X64-NEXT: vzeroupper
827 ; X86-LABEL: copysign_v4f64:
829 ; X86-NEXT: pushl %ebp
830 ; X86-NEXT: movl %esp, %ebp
831 ; X86-NEXT: andl $-8, %esp
832 ; X86-NEXT: subl $8, %esp
833 ; X86-NEXT: vandps {{\.LCPI.*}}, %xmm1, %xmm1
834 ; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
835 ; X86-NEXT: vorps %xmm1, %xmm0, %xmm0
836 ; X86-NEXT: vmovlps %xmm0, (%esp)
837 ; X86-NEXT: fldl (%esp)
838 ; X86-NEXT: movl %ebp, %esp
839 ; X86-NEXT: popl %ebp
840 ; X86-NEXT: vzeroupper
842 %v = call <4 x double> @llvm.copysign.v4f64(<4 x double> %x, <4 x double> %y)
843 %r = extractelement <4 x double> %v, i32 0
847 define float @floor_v4f32(<4 x float> %x) nounwind {
848 ; X64-LABEL: floor_v4f32:
850 ; X64-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
853 ; X86-LABEL: floor_v4f32:
855 ; X86-NEXT: pushl %eax
856 ; X86-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
857 ; X86-NEXT: vmovss %xmm0, (%esp)
858 ; X86-NEXT: flds (%esp)
859 ; X86-NEXT: popl %eax
861 %v = call <4 x float> @llvm.floor.v4f32(<4 x float> %x)
862 %r = extractelement <4 x float> %v, i32 0
866 define double @floor_v4f64(<4 x double> %x) nounwind {
867 ; X64-LABEL: floor_v4f64:
869 ; X64-NEXT: vroundsd $9, %xmm0, %xmm0, %xmm0
870 ; X64-NEXT: vzeroupper
873 ; X86-LABEL: floor_v4f64:
875 ; X86-NEXT: pushl %ebp
876 ; X86-NEXT: movl %esp, %ebp
877 ; X86-NEXT: andl $-8, %esp
878 ; X86-NEXT: subl $8, %esp
879 ; X86-NEXT: vroundsd $9, %xmm0, %xmm0, %xmm0
880 ; X86-NEXT: vmovsd %xmm0, (%esp)
881 ; X86-NEXT: fldl (%esp)
882 ; X86-NEXT: movl %ebp, %esp
883 ; X86-NEXT: popl %ebp
884 ; X86-NEXT: vzeroupper
886 %v = call <4 x double> @llvm.floor.v4f64(<4 x double> %x)
887 %r = extractelement <4 x double> %v, i32 0
891 define float @ceil_v4f32(<4 x float> %x) nounwind {
892 ; X64-LABEL: ceil_v4f32:
894 ; X64-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
897 ; X86-LABEL: ceil_v4f32:
899 ; X86-NEXT: pushl %eax
900 ; X86-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
901 ; X86-NEXT: vmovss %xmm0, (%esp)
902 ; X86-NEXT: flds (%esp)
903 ; X86-NEXT: popl %eax
905 %v = call <4 x float> @llvm.ceil.v4f32(<4 x float> %x)
906 %r = extractelement <4 x float> %v, i32 0
910 define double @ceil_v4f64(<4 x double> %x) nounwind {
911 ; X64-LABEL: ceil_v4f64:
913 ; X64-NEXT: vroundsd $10, %xmm0, %xmm0, %xmm0
914 ; X64-NEXT: vzeroupper
917 ; X86-LABEL: ceil_v4f64:
919 ; X86-NEXT: pushl %ebp
920 ; X86-NEXT: movl %esp, %ebp
921 ; X86-NEXT: andl $-8, %esp
922 ; X86-NEXT: subl $8, %esp
923 ; X86-NEXT: vroundsd $10, %xmm0, %xmm0, %xmm0
924 ; X86-NEXT: vmovsd %xmm0, (%esp)
925 ; X86-NEXT: fldl (%esp)
926 ; X86-NEXT: movl %ebp, %esp
927 ; X86-NEXT: popl %ebp
928 ; X86-NEXT: vzeroupper
930 %v = call <4 x double> @llvm.ceil.v4f64(<4 x double> %x)
931 %r = extractelement <4 x double> %v, i32 0
935 define float @trunc_v4f32(<4 x float> %x) nounwind {
936 ; X64-LABEL: trunc_v4f32:
938 ; X64-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
941 ; X86-LABEL: trunc_v4f32:
943 ; X86-NEXT: pushl %eax
944 ; X86-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
945 ; X86-NEXT: vmovss %xmm0, (%esp)
946 ; X86-NEXT: flds (%esp)
947 ; X86-NEXT: popl %eax
949 %v = call <4 x float> @llvm.trunc.v4f32(<4 x float> %x)
950 %r = extractelement <4 x float> %v, i32 0
954 define double @trunc_v4f64(<4 x double> %x) nounwind {
955 ; X64-LABEL: trunc_v4f64:
957 ; X64-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
958 ; X64-NEXT: vzeroupper
961 ; X86-LABEL: trunc_v4f64:
963 ; X86-NEXT: pushl %ebp
964 ; X86-NEXT: movl %esp, %ebp
965 ; X86-NEXT: andl $-8, %esp
966 ; X86-NEXT: subl $8, %esp
967 ; X86-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
968 ; X86-NEXT: vmovsd %xmm0, (%esp)
969 ; X86-NEXT: fldl (%esp)
970 ; X86-NEXT: movl %ebp, %esp
971 ; X86-NEXT: popl %ebp
972 ; X86-NEXT: vzeroupper
974 %v = call <4 x double> @llvm.trunc.v4f64(<4 x double> %x)
975 %r = extractelement <4 x double> %v, i32 0
979 define float @rint_v4f32(<4 x float> %x) nounwind {
980 ; X64-LABEL: rint_v4f32:
982 ; X64-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
985 ; X86-LABEL: rint_v4f32:
987 ; X86-NEXT: pushl %eax
988 ; X86-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
989 ; X86-NEXT: vmovss %xmm0, (%esp)
990 ; X86-NEXT: flds (%esp)
991 ; X86-NEXT: popl %eax
993 %v = call <4 x float> @llvm.rint.v4f32(<4 x float> %x)
994 %r = extractelement <4 x float> %v, i32 0
998 define double @rint_v4f64(<4 x double> %x) nounwind {
999 ; X64-LABEL: rint_v4f64:
1001 ; X64-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
1002 ; X64-NEXT: vzeroupper
1005 ; X86-LABEL: rint_v4f64:
1007 ; X86-NEXT: pushl %ebp
1008 ; X86-NEXT: movl %esp, %ebp
1009 ; X86-NEXT: andl $-8, %esp
1010 ; X86-NEXT: subl $8, %esp
1011 ; X86-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
1012 ; X86-NEXT: vmovsd %xmm0, (%esp)
1013 ; X86-NEXT: fldl (%esp)
1014 ; X86-NEXT: movl %ebp, %esp
1015 ; X86-NEXT: popl %ebp
1016 ; X86-NEXT: vzeroupper
1018 %v = call <4 x double> @llvm.rint.v4f64(<4 x double> %x)
1019 %r = extractelement <4 x double> %v, i32 0
1023 define float @nearbyint_v4f32(<4 x float> %x) nounwind {
1024 ; X64-LABEL: nearbyint_v4f32:
1026 ; X64-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
1029 ; X86-LABEL: nearbyint_v4f32:
1031 ; X86-NEXT: pushl %eax
1032 ; X86-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
1033 ; X86-NEXT: vmovss %xmm0, (%esp)
1034 ; X86-NEXT: flds (%esp)
1035 ; X86-NEXT: popl %eax
1037 %v = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %x)
1038 %r = extractelement <4 x float> %v, i32 0
1042 define double @nearbyint_v4f64(<4 x double> %x) nounwind {
1043 ; X64-LABEL: nearbyint_v4f64:
1045 ; X64-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
1046 ; X64-NEXT: vzeroupper
1049 ; X86-LABEL: nearbyint_v4f64:
1051 ; X86-NEXT: pushl %ebp
1052 ; X86-NEXT: movl %esp, %ebp
1053 ; X86-NEXT: andl $-8, %esp
1054 ; X86-NEXT: subl $8, %esp
1055 ; X86-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
1056 ; X86-NEXT: vmovsd %xmm0, (%esp)
1057 ; X86-NEXT: fldl (%esp)
1058 ; X86-NEXT: movl %ebp, %esp
1059 ; X86-NEXT: popl %ebp
1060 ; X86-NEXT: vzeroupper
1062 %v = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %x)
1063 %r = extractelement <4 x double> %v, i32 0
1067 define float @round_v4f32(<4 x float> %x) nounwind {
1068 ; X64-LABEL: round_v4f32:
1070 ; X64-NEXT: jmp roundf # TAILCALL
1072 ; X86-LABEL: round_v4f32:
1074 ; X86-NEXT: pushl %eax
1075 ; X86-NEXT: vmovss %xmm0, (%esp)
1076 ; X86-NEXT: calll roundf
1077 ; X86-NEXT: popl %eax
1079 %v = call <4 x float> @llvm.round.v4f32(<4 x float> %x)
1080 %r = extractelement <4 x float> %v, i32 0
1084 define double @round_v4f64(<4 x double> %x) nounwind {
1085 ; X64-LABEL: round_v4f64:
1087 ; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1088 ; X64-NEXT: vzeroupper
1089 ; X64-NEXT: jmp round # TAILCALL
1091 ; X86-LABEL: round_v4f64:
1093 ; X86-NEXT: subl $8, %esp
1094 ; X86-NEXT: vmovlps %xmm0, (%esp)
1095 ; X86-NEXT: vzeroupper
1096 ; X86-NEXT: calll round
1097 ; X86-NEXT: addl $8, %esp
1099 %v = call <4 x double> @llvm.round.v4f64(<4 x double> %x)
1100 %r = extractelement <4 x double> %v, i32 0
1104 define float @rcp_v4f32(<4 x float> %x) nounwind {
1105 ; X64-LABEL: rcp_v4f32:
1107 ; X64-NEXT: vrcpss %xmm0, %xmm0, %xmm0
1110 ; X86-LABEL: rcp_v4f32:
1112 ; X86-NEXT: pushl %eax
1113 ; X86-NEXT: vrcpss %xmm0, %xmm0, %xmm0
1114 ; X86-NEXT: vmovss %xmm0, (%esp)
1115 ; X86-NEXT: flds (%esp)
1116 ; X86-NEXT: popl %eax
1118 %v = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %x)
1119 %r = extractelement <4 x float> %v, i32 0
1123 define float @rcp_v8f32(<8 x float> %x) nounwind {
1124 ; X64-LABEL: rcp_v8f32:
1126 ; X64-NEXT: vrcpps %ymm0, %ymm0
1127 ; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1128 ; X64-NEXT: vzeroupper
1131 ; X86-LABEL: rcp_v8f32:
1133 ; X86-NEXT: pushl %eax
1134 ; X86-NEXT: vrcpps %ymm0, %ymm0
1135 ; X86-NEXT: vmovss %xmm0, (%esp)
1136 ; X86-NEXT: flds (%esp)
1137 ; X86-NEXT: popl %eax
1138 ; X86-NEXT: vzeroupper
1140 %v = call <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float> %x)
1141 %r = extractelement <8 x float> %v, i32 0
1145 define float @rsqrt_v4f32(<4 x float> %x) nounwind {
1146 ; X64-LABEL: rsqrt_v4f32:
1148 ; X64-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
1151 ; X86-LABEL: rsqrt_v4f32:
1153 ; X86-NEXT: pushl %eax
1154 ; X86-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
1155 ; X86-NEXT: vmovss %xmm0, (%esp)
1156 ; X86-NEXT: flds (%esp)
1157 ; X86-NEXT: popl %eax
1159 %v = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %x)
1160 %r = extractelement <4 x float> %v, i32 0
1164 define float @rsqrt_v8f32(<8 x float> %x) nounwind {
1165 ; X64-LABEL: rsqrt_v8f32:
1167 ; X64-NEXT: vrsqrtps %ymm0, %ymm0
1168 ; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
1169 ; X64-NEXT: vzeroupper
1172 ; X86-LABEL: rsqrt_v8f32:
1174 ; X86-NEXT: pushl %eax
1175 ; X86-NEXT: vrsqrtps %ymm0, %ymm0
1176 ; X86-NEXT: vmovss %xmm0, (%esp)
1177 ; X86-NEXT: flds (%esp)
1178 ; X86-NEXT: popl %eax
1179 ; X86-NEXT: vzeroupper
1181 %v = call <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float> %x)
1182 %r = extractelement <8 x float> %v, i32 0
1186 declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
1187 declare <4 x double> @llvm.sqrt.v4f64(<4 x double>)
1188 declare <4 x float> @llvm.sin.v4f32(<4 x float>)
1189 declare <4 x double> @llvm.sin.v4f64(<4 x double>)
1190 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
1191 declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
1192 declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
1193 declare <4 x double> @llvm.fabs.v4f64(<4 x double>)
1194 declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
1195 declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>)
1196 declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
1197 declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>)
1198 declare <4 x float> @llvm.maximum.v4f32(<4 x float>, <4 x float>)
1199 declare <4 x double> @llvm.maximum.v4f64(<4 x double>, <4 x double>)
1200 declare <4 x float> @llvm.minimum.v4f32(<4 x float>, <4 x float>)
1201 declare <4 x double> @llvm.minimum.v4f64(<4 x double>, <4 x double>)
1202 declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>)
1203 declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>)
1204 declare <4 x float> @llvm.floor.v4f32(<4 x float>)
1205 declare <4 x double> @llvm.floor.v4f64(<4 x double>)
1206 declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
1207 declare <4 x double> @llvm.ceil.v4f64(<4 x double>)
1208 declare <4 x float> @llvm.trunc.v4f32(<4 x float>)
1209 declare <4 x double> @llvm.trunc.v4f64(<4 x double>)
1210 declare <4 x float> @llvm.rint.v4f32(<4 x float>)
1211 declare <4 x double> @llvm.rint.v4f64(<4 x double>)
1212 declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>)
1213 declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>)
1214 declare <4 x float> @llvm.round.v4f32(<4 x float>)
1215 declare <4 x double> @llvm.round.v4f64(<4 x double>)
1217 declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>)
1218 declare <8 x float> @llvm.x86.avx.rcp.ps.256(<8 x float>)
1219 declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>)
1220 declare <8 x float> @llvm.x86.avx.rsqrt.ps.256(<8 x float>)