1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefixes=X86
3 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+sse,+sse2,+sse4.1 | FileCheck %s --check-prefixes=X64
4 ; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+avx | FileCheck %s --check-prefixes=X86_AVX,X86_AVX1
5 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+avx | FileCheck %s --check-prefixes=X64_AVX,X64_AVX1
6 ; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin9 -mattr=+avx512f | FileCheck %s --check-prefixes=X86_AVX,X86_AVX512
7 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin9 -mattr=+avx512f | FileCheck %s --check-prefixes=X64_AVX,X64_AVX512
9 define i16 @test1(float %f) nounwind {
12 ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
13 ; X86-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
14 ; X86-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
15 ; X86-NEXT: xorps %xmm1, %xmm1
16 ; X86-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
17 ; X86-NEXT: minss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
18 ; X86-NEXT: maxss %xmm1, %xmm0
19 ; X86-NEXT: cvttss2si %xmm0, %eax
20 ; X86-NEXT: ## kill: def $ax killed $ax killed $eax
25 ; X64-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
26 ; X64-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
27 ; X64-NEXT: xorps %xmm1, %xmm1
28 ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
29 ; X64-NEXT: minss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
30 ; X64-NEXT: maxss %xmm1, %xmm0
31 ; X64-NEXT: cvttss2si %xmm0, %eax
32 ; X64-NEXT: ## kill: def $ax killed $ax killed $eax
35 ; X86_AVX1-LABEL: test1:
37 ; X86_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
38 ; X86_AVX1-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
39 ; X86_AVX1-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
40 ; X86_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
41 ; X86_AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
42 ; X86_AVX1-NEXT: vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
43 ; X86_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
44 ; X86_AVX1-NEXT: vcvttss2si %xmm0, %eax
45 ; X86_AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
48 ; X64_AVX1-LABEL: test1:
50 ; X64_AVX1-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
51 ; X64_AVX1-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
52 ; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
53 ; X64_AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
54 ; X64_AVX1-NEXT: vminss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
55 ; X64_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
56 ; X64_AVX1-NEXT: vcvttss2si %xmm0, %eax
57 ; X64_AVX1-NEXT: ## kill: def $ax killed $ax killed $eax
60 ; X86_AVX512-LABEL: test1:
61 ; X86_AVX512: ## %bb.0:
62 ; X86_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
63 ; X86_AVX512-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
64 ; X86_AVX512-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
65 ; X86_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
66 ; X86_AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
67 ; X86_AVX512-NEXT: vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
68 ; X86_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
69 ; X86_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
70 ; X86_AVX512-NEXT: vcvttss2si %xmm0, %eax
71 ; X86_AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
72 ; X86_AVX512-NEXT: retl
74 ; X64_AVX512-LABEL: test1:
75 ; X64_AVX512: ## %bb.0:
76 ; X64_AVX512-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
77 ; X64_AVX512-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
78 ; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
79 ; X64_AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
80 ; X64_AVX512-NEXT: vminss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
81 ; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
82 ; X64_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
83 ; X64_AVX512-NEXT: vcvttss2si %xmm0, %eax
84 ; X64_AVX512-NEXT: ## kill: def $ax killed $ax killed $eax
85 ; X64_AVX512-NEXT: retq
86 %tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
87 %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
88 %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
89 %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
90 %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
91 %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
92 %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
93 %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer ) ; <<4 x float>> [#uses=1]
94 %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1]
95 %tmp69 = trunc i32 %tmp.upgrd.1 to i16 ; <i16> [#uses=1]
99 define i16 @test2(float %f) nounwind {
102 ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
103 ; X86-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
104 ; X86-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
105 ; X86-NEXT: minss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
106 ; X86-NEXT: xorps %xmm1, %xmm1
107 ; X86-NEXT: maxss %xmm1, %xmm0
108 ; X86-NEXT: cvttss2si %xmm0, %eax
109 ; X86-NEXT: ## kill: def $ax killed $ax killed $eax
114 ; X64-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
115 ; X64-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
116 ; X64-NEXT: minss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
117 ; X64-NEXT: xorps %xmm1, %xmm1
118 ; X64-NEXT: maxss %xmm1, %xmm0
119 ; X64-NEXT: cvttss2si %xmm0, %eax
120 ; X64-NEXT: ## kill: def $ax killed $ax killed $eax
123 ; X86_AVX-LABEL: test2:
125 ; X86_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
126 ; X86_AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
127 ; X86_AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
128 ; X86_AVX-NEXT: vminss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
129 ; X86_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
130 ; X86_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
131 ; X86_AVX-NEXT: vcvttss2si %xmm0, %eax
132 ; X86_AVX-NEXT: ## kill: def $ax killed $ax killed $eax
135 ; X64_AVX-LABEL: test2:
137 ; X64_AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
138 ; X64_AVX-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
139 ; X64_AVX-NEXT: vminss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
140 ; X64_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
141 ; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
142 ; X64_AVX-NEXT: vcvttss2si %xmm0, %eax
143 ; X64_AVX-NEXT: ## kill: def $ax killed $ax killed $eax
145 %tmp28 = fsub float %f, 1.000000e+00 ; <float> [#uses=1]
146 %tmp37 = fmul float %tmp28, 5.000000e-01 ; <float> [#uses=1]
147 %tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0 ; <<4 x float>> [#uses=1]
148 %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > ) ; <<4 x float>> [#uses=1]
149 %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> < float 0.000000e+00, float undef, float undef, float undef > ) ; <<4 x float>> [#uses=1]
150 %tmp = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1]
151 %tmp69 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
155 declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
157 declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
159 declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
161 declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
163 declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
165 declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32)
167 declare <4 x float> @f()
169 define <4 x float> @test3(<4 x float> %A, ptr%b, i32 %C) nounwind {
172 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
173 ; X86-NEXT: roundss $4, (%eax), %xmm0
178 ; X64-NEXT: roundss $4, (%rdi), %xmm0
181 ; X86_AVX-LABEL: test3:
183 ; X86_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
184 ; X86_AVX-NEXT: vroundss $4, (%eax), %xmm0, %xmm0
187 ; X64_AVX-LABEL: test3:
189 ; X64_AVX-NEXT: vroundss $4, (%rdi), %xmm0, %xmm0
191 %a = load float , ptr%b
192 %B = insertelement <4 x float> undef, float %a, i32 0
193 %X = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %A, <4 x float> %B, i32 4)
197 define <4 x float> @test4(<4 x float> %A, ptr%b, i32 %C) nounwind {
200 ; X86-NEXT: subl $28, %esp
201 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
202 ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
203 ; X86-NEXT: movaps %xmm0, (%esp) ## 16-byte Spill
205 ; X86-NEXT: roundss $4, (%esp), %xmm0 ## 16-byte Folded Reload
206 ; X86-NEXT: addl $28, %esp
211 ; X64-NEXT: subq $24, %rsp
212 ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
213 ; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
215 ; X64-NEXT: roundss $4, (%rsp), %xmm0 ## 16-byte Folded Reload
216 ; X64-NEXT: addq $24, %rsp
219 ; X86_AVX-LABEL: test4:
221 ; X86_AVX-NEXT: subl $28, %esp
222 ; X86_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
223 ; X86_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
224 ; X86_AVX-NEXT: vmovaps %xmm0, (%esp) ## 16-byte Spill
225 ; X86_AVX-NEXT: calll _f
226 ; X86_AVX-NEXT: vroundss $4, (%esp), %xmm0, %xmm0 ## 16-byte Folded Reload
227 ; X86_AVX-NEXT: addl $28, %esp
230 ; X64_AVX-LABEL: test4:
232 ; X64_AVX-NEXT: subq $24, %rsp
233 ; X64_AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
234 ; X64_AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
235 ; X64_AVX-NEXT: callq _f
236 ; X64_AVX-NEXT: vroundss $4, (%rsp), %xmm0, %xmm0 ## 16-byte Folded Reload
237 ; X64_AVX-NEXT: addq $24, %rsp
239 %a = load float , ptr%b
240 %B = insertelement <4 x float> undef, float %a, i32 0
241 %q = call <4 x float> @f()
242 %X = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %q, <4 x float> %B, i32 4)
247 define <2 x double> @test5() nounwind uwtable readnone noinline {
249 ; X86: ## %bb.0: ## %entry
250 ; X86-NEXT: movaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
254 ; X64: ## %bb.0: ## %entry
255 ; X64-NEXT: movaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
258 ; X86_AVX-LABEL: test5:
259 ; X86_AVX: ## %bb.0: ## %entry
260 ; X86_AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
263 ; X64_AVX-LABEL: test5:
264 ; X64_AVX: ## %bb.0: ## %entry
265 ; X64_AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.28E+2,1.23321E+2]
268 %0 = tail call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> <double 4.569870e+02, double 1.233210e+02>, i32 128) nounwind readnone
272 declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone
274 define <4 x float> @minss_fold(ptr %x, <4 x float> %y) {
275 ; X86-LABEL: minss_fold:
276 ; X86: ## %bb.0: ## %entry
277 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
278 ; X86-NEXT: minss (%eax), %xmm0
281 ; X64-LABEL: minss_fold:
282 ; X64: ## %bb.0: ## %entry
283 ; X64-NEXT: minss (%rdi), %xmm0
286 ; X86_AVX-LABEL: minss_fold:
287 ; X86_AVX: ## %bb.0: ## %entry
288 ; X86_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
289 ; X86_AVX-NEXT: vminss (%eax), %xmm0, %xmm0
292 ; X64_AVX-LABEL: minss_fold:
293 ; X64_AVX: ## %bb.0: ## %entry
294 ; X64_AVX-NEXT: vminss (%rdi), %xmm0, %xmm0
297 %0 = load float, ptr %x, align 1
298 %vecinit.i = insertelement <4 x float> undef, float %0, i32 0
299 %vecinit2.i = insertelement <4 x float> %vecinit.i, float 0.000000e+00, i32 1
300 %vecinit3.i = insertelement <4 x float> %vecinit2.i, float 0.000000e+00, i32 2
301 %vecinit4.i = insertelement <4 x float> %vecinit3.i, float 0.000000e+00, i32 3
302 %1 = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %y, <4 x float> %vecinit4.i)
306 define <4 x float> @maxss_fold(ptr %x, <4 x float> %y) {
307 ; X86-LABEL: maxss_fold:
308 ; X86: ## %bb.0: ## %entry
309 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
310 ; X86-NEXT: maxss (%eax), %xmm0
313 ; X64-LABEL: maxss_fold:
314 ; X64: ## %bb.0: ## %entry
315 ; X64-NEXT: maxss (%rdi), %xmm0
318 ; X86_AVX-LABEL: maxss_fold:
319 ; X86_AVX: ## %bb.0: ## %entry
320 ; X86_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
321 ; X86_AVX-NEXT: vmaxss (%eax), %xmm0, %xmm0
324 ; X64_AVX-LABEL: maxss_fold:
325 ; X64_AVX: ## %bb.0: ## %entry
326 ; X64_AVX-NEXT: vmaxss (%rdi), %xmm0, %xmm0
329 %0 = load float, ptr %x, align 1
330 %vecinit.i = insertelement <4 x float> undef, float %0, i32 0
331 %vecinit2.i = insertelement <4 x float> %vecinit.i, float 0.000000e+00, i32 1
332 %vecinit3.i = insertelement <4 x float> %vecinit2.i, float 0.000000e+00, i32 2
333 %vecinit4.i = insertelement <4 x float> %vecinit3.i, float 0.000000e+00, i32 3
334 %1 = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %y, <4 x float> %vecinit4.i)
338 define <4 x float> @cmpss_fold(ptr %x, <4 x float> %y) {
339 ; X86-LABEL: cmpss_fold:
340 ; X86: ## %bb.0: ## %entry
341 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
342 ; X86-NEXT: cmpeqss (%eax), %xmm0
345 ; X64-LABEL: cmpss_fold:
346 ; X64: ## %bb.0: ## %entry
347 ; X64-NEXT: cmpeqss (%rdi), %xmm0
350 ; X86_AVX-LABEL: cmpss_fold:
351 ; X86_AVX: ## %bb.0: ## %entry
352 ; X86_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
353 ; X86_AVX-NEXT: vcmpeqss (%eax), %xmm0, %xmm0
356 ; X64_AVX-LABEL: cmpss_fold:
357 ; X64_AVX: ## %bb.0: ## %entry
358 ; X64_AVX-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0
361 %0 = load float, ptr %x, align 1
362 %vecinit.i = insertelement <4 x float> undef, float %0, i32 0
363 %vecinit2.i = insertelement <4 x float> %vecinit.i, float 0.000000e+00, i32 1
364 %vecinit3.i = insertelement <4 x float> %vecinit2.i, float 0.000000e+00, i32 2
365 %vecinit4.i = insertelement <4 x float> %vecinit3.i, float 0.000000e+00, i32 3
366 %1 = tail call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %y, <4 x float> %vecinit4.i, i8 0)
369 declare <4 x float> @llvm.x86.sse.cmp.ss(<4 x float>, <4 x float>, i8) nounwind readnone
372 define <4 x float> @double_fold(ptr %x, <4 x float> %y) {
373 ; X86-LABEL: double_fold:
374 ; X86: ## %bb.0: ## %entry
375 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
376 ; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
377 ; X86-NEXT: movaps %xmm0, %xmm2
378 ; X86-NEXT: minss %xmm1, %xmm2
379 ; X86-NEXT: maxss %xmm1, %xmm0
380 ; X86-NEXT: addps %xmm2, %xmm0
383 ; X64-LABEL: double_fold:
384 ; X64: ## %bb.0: ## %entry
385 ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
386 ; X64-NEXT: movaps %xmm0, %xmm2
387 ; X64-NEXT: minss %xmm1, %xmm2
388 ; X64-NEXT: maxss %xmm1, %xmm0
389 ; X64-NEXT: addps %xmm2, %xmm0
392 ; X86_AVX-LABEL: double_fold:
393 ; X86_AVX: ## %bb.0: ## %entry
394 ; X86_AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
395 ; X86_AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
396 ; X86_AVX-NEXT: vminss %xmm1, %xmm0, %xmm2
397 ; X86_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
398 ; X86_AVX-NEXT: vaddps %xmm0, %xmm2, %xmm0
401 ; X64_AVX-LABEL: double_fold:
402 ; X64_AVX: ## %bb.0: ## %entry
403 ; X64_AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
404 ; X64_AVX-NEXT: vminss %xmm1, %xmm0, %xmm2
405 ; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
406 ; X64_AVX-NEXT: vaddps %xmm0, %xmm2, %xmm0
409 %0 = load float, ptr %x, align 1
410 %vecinit.i = insertelement <4 x float> undef, float %0, i32 0
411 %1 = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %y, <4 x float> %vecinit.i)
412 %2 = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %y, <4 x float> %vecinit.i)
413 %3 = fadd <4 x float> %1, %2