1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=sse2 < %s | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
3 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=avx < %s | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
5 declare float @fminf(float, float)
6 declare double @fmin(double, double)
7 declare x86_fp80 @fminl(x86_fp80, x86_fp80)
8 declare float @llvm.minnum.f32(float, float)
9 declare double @llvm.minnum.f64(double, double)
10 declare x86_fp80 @llvm.minnum.f80(x86_fp80, x86_fp80)
12 declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
13 declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
14 declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
15 declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>)
16 declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>)
18 ; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
20 define float @test_fminf(float %x, float %y) {
21 ; SSE-LABEL: test_fminf:
23 ; SSE-NEXT: movaps %xmm0, %xmm2
24 ; SSE-NEXT: cmpunordss %xmm0, %xmm2
25 ; SSE-NEXT: movaps %xmm2, %xmm3
26 ; SSE-NEXT: andps %xmm1, %xmm3
27 ; SSE-NEXT: minss %xmm0, %xmm1
28 ; SSE-NEXT: andnps %xmm1, %xmm2
29 ; SSE-NEXT: orps %xmm3, %xmm2
30 ; SSE-NEXT: movaps %xmm2, %xmm0
33 ; AVX-LABEL: test_fminf:
35 ; AVX-NEXT: vminss %xmm0, %xmm1, %xmm2
36 ; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
37 ; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
39 %z = call float @fminf(float %x, float %y) readnone
43 ; FIXME: As the vector tests show, the SSE run shouldn't need this many moves.
45 define double @test_fmin(double %x, double %y) {
46 ; SSE-LABEL: test_fmin:
48 ; SSE-NEXT: movapd %xmm0, %xmm2
49 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2
50 ; SSE-NEXT: movapd %xmm2, %xmm3
51 ; SSE-NEXT: andpd %xmm1, %xmm3
52 ; SSE-NEXT: minsd %xmm0, %xmm1
53 ; SSE-NEXT: andnpd %xmm1, %xmm2
54 ; SSE-NEXT: orpd %xmm3, %xmm2
55 ; SSE-NEXT: movapd %xmm2, %xmm0
58 ; AVX-LABEL: test_fmin:
60 ; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2
61 ; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
62 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
64 %z = call double @fmin(double %x, double %y) readnone
68 define x86_fp80 @test_fminl(x86_fp80 %x, x86_fp80 %y) {
69 ; CHECK-LABEL: test_fminl:
71 ; CHECK-NEXT: subq $40, %rsp
72 ; CHECK-NEXT: .cfi_def_cfa_offset 48
73 ; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
74 ; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
75 ; CHECK-NEXT: fstpt {{[0-9]+}}(%rsp)
76 ; CHECK-NEXT: fstpt (%rsp)
77 ; CHECK-NEXT: callq fminl
78 ; CHECK-NEXT: addq $40, %rsp
79 ; CHECK-NEXT: .cfi_def_cfa_offset 8
81 %z = call x86_fp80 @fminl(x86_fp80 %x, x86_fp80 %y) readnone
85 define float @test_intrinsic_fminf(float %x, float %y) {
86 ; SSE-LABEL: test_intrinsic_fminf:
88 ; SSE-NEXT: movaps %xmm0, %xmm2
89 ; SSE-NEXT: cmpunordss %xmm0, %xmm2
90 ; SSE-NEXT: movaps %xmm2, %xmm3
91 ; SSE-NEXT: andps %xmm1, %xmm3
92 ; SSE-NEXT: minss %xmm0, %xmm1
93 ; SSE-NEXT: andnps %xmm1, %xmm2
94 ; SSE-NEXT: orps %xmm3, %xmm2
95 ; SSE-NEXT: movaps %xmm2, %xmm0
98 ; AVX-LABEL: test_intrinsic_fminf:
100 ; AVX-NEXT: vminss %xmm0, %xmm1, %xmm2
101 ; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
102 ; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
104 %z = call float @llvm.minnum.f32(float %x, float %y) readnone
108 define double @test_intrinsic_fmin(double %x, double %y) {
109 ; SSE-LABEL: test_intrinsic_fmin:
111 ; SSE-NEXT: movapd %xmm0, %xmm2
112 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2
113 ; SSE-NEXT: movapd %xmm2, %xmm3
114 ; SSE-NEXT: andpd %xmm1, %xmm3
115 ; SSE-NEXT: minsd %xmm0, %xmm1
116 ; SSE-NEXT: andnpd %xmm1, %xmm2
117 ; SSE-NEXT: orpd %xmm3, %xmm2
118 ; SSE-NEXT: movapd %xmm2, %xmm0
121 ; AVX-LABEL: test_intrinsic_fmin:
123 ; AVX-NEXT: vminsd %xmm0, %xmm1, %xmm2
124 ; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
125 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
127 %z = call double @llvm.minnum.f64(double %x, double %y) readnone
131 define x86_fp80 @test_intrinsic_fminl(x86_fp80 %x, x86_fp80 %y) {
132 ; CHECK-LABEL: test_intrinsic_fminl:
134 ; CHECK-NEXT: subq $40, %rsp
135 ; CHECK-NEXT: .cfi_def_cfa_offset 48
136 ; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
137 ; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
138 ; CHECK-NEXT: fstpt {{[0-9]+}}(%rsp)
139 ; CHECK-NEXT: fstpt (%rsp)
140 ; CHECK-NEXT: callq fminl
141 ; CHECK-NEXT: addq $40, %rsp
142 ; CHECK-NEXT: .cfi_def_cfa_offset 8
144 %z = call x86_fp80 @llvm.minnum.f80(x86_fp80 %x, x86_fp80 %y) readnone
148 define <2 x float> @test_intrinsic_fmin_v2f32(<2 x float> %x, <2 x float> %y) {
149 ; SSE-LABEL: test_intrinsic_fmin_v2f32:
151 ; SSE-NEXT: movaps %xmm1, %xmm2
152 ; SSE-NEXT: minps %xmm0, %xmm2
153 ; SSE-NEXT: cmpunordps %xmm0, %xmm0
154 ; SSE-NEXT: andps %xmm0, %xmm1
155 ; SSE-NEXT: andnps %xmm2, %xmm0
156 ; SSE-NEXT: orps %xmm1, %xmm0
159 ; AVX-LABEL: test_intrinsic_fmin_v2f32:
161 ; AVX-NEXT: vminps %xmm0, %xmm1, %xmm2
162 ; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
163 ; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
165 %z = call <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> %y) readnone
169 define <4 x float> @test_intrinsic_fmin_v4f32(<4 x float> %x, <4 x float> %y) {
170 ; SSE-LABEL: test_intrinsic_fmin_v4f32:
172 ; SSE-NEXT: movaps %xmm1, %xmm2
173 ; SSE-NEXT: minps %xmm0, %xmm2
174 ; SSE-NEXT: cmpunordps %xmm0, %xmm0
175 ; SSE-NEXT: andps %xmm0, %xmm1
176 ; SSE-NEXT: andnps %xmm2, %xmm0
177 ; SSE-NEXT: orps %xmm1, %xmm0
180 ; AVX-LABEL: test_intrinsic_fmin_v4f32:
182 ; AVX-NEXT: vminps %xmm0, %xmm1, %xmm2
183 ; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm0
184 ; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
186 %z = call <4 x float> @llvm.minnum.v4f32(<4 x float> %x, <4 x float> %y) readnone
190 define <2 x double> @test_intrinsic_fmin_v2f64(<2 x double> %x, <2 x double> %y) {
191 ; SSE-LABEL: test_intrinsic_fmin_v2f64:
193 ; SSE-NEXT: movapd %xmm1, %xmm2
194 ; SSE-NEXT: minpd %xmm0, %xmm2
195 ; SSE-NEXT: cmpunordpd %xmm0, %xmm0
196 ; SSE-NEXT: andpd %xmm0, %xmm1
197 ; SSE-NEXT: andnpd %xmm2, %xmm0
198 ; SSE-NEXT: orpd %xmm1, %xmm0
201 ; AVX-LABEL: test_intrinsic_fmin_v2f64:
203 ; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm2
204 ; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm0
205 ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
207 %z = call <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> %y) readnone
211 define <4 x double> @test_intrinsic_fmin_v4f64(<4 x double> %x, <4 x double> %y) {
212 ; SSE-LABEL: test_intrinsic_fmin_v4f64:
214 ; SSE-NEXT: movapd %xmm2, %xmm4
215 ; SSE-NEXT: minpd %xmm0, %xmm4
216 ; SSE-NEXT: cmpunordpd %xmm0, %xmm0
217 ; SSE-NEXT: andpd %xmm0, %xmm2
218 ; SSE-NEXT: andnpd %xmm4, %xmm0
219 ; SSE-NEXT: orpd %xmm2, %xmm0
220 ; SSE-NEXT: movapd %xmm3, %xmm2
221 ; SSE-NEXT: minpd %xmm1, %xmm2
222 ; SSE-NEXT: cmpunordpd %xmm1, %xmm1
223 ; SSE-NEXT: andpd %xmm1, %xmm3
224 ; SSE-NEXT: andnpd %xmm2, %xmm1
225 ; SSE-NEXT: orpd %xmm3, %xmm1
228 ; AVX-LABEL: test_intrinsic_fmin_v4f64:
230 ; AVX-NEXT: vminpd %ymm0, %ymm1, %ymm2
231 ; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
232 ; AVX-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
234 %z = call <4 x double> @llvm.minnum.v4f64(<4 x double> %x, <4 x double> %y) readnone
238 define <8 x double> @test_intrinsic_fmin_v8f64(<8 x double> %x, <8 x double> %y) {
239 ; SSE-LABEL: test_intrinsic_fmin_v8f64:
241 ; SSE-NEXT: movapd %xmm4, %xmm8
242 ; SSE-NEXT: minpd %xmm0, %xmm8
243 ; SSE-NEXT: cmpunordpd %xmm0, %xmm0
244 ; SSE-NEXT: andpd %xmm0, %xmm4
245 ; SSE-NEXT: andnpd %xmm8, %xmm0
246 ; SSE-NEXT: orpd %xmm4, %xmm0
247 ; SSE-NEXT: movapd %xmm5, %xmm4
248 ; SSE-NEXT: minpd %xmm1, %xmm4
249 ; SSE-NEXT: cmpunordpd %xmm1, %xmm1
250 ; SSE-NEXT: andpd %xmm1, %xmm5
251 ; SSE-NEXT: andnpd %xmm4, %xmm1
252 ; SSE-NEXT: orpd %xmm5, %xmm1
253 ; SSE-NEXT: movapd %xmm6, %xmm4
254 ; SSE-NEXT: minpd %xmm2, %xmm4
255 ; SSE-NEXT: cmpunordpd %xmm2, %xmm2
256 ; SSE-NEXT: andpd %xmm2, %xmm6
257 ; SSE-NEXT: andnpd %xmm4, %xmm2
258 ; SSE-NEXT: orpd %xmm6, %xmm2
259 ; SSE-NEXT: movapd %xmm7, %xmm4
260 ; SSE-NEXT: minpd %xmm3, %xmm4
261 ; SSE-NEXT: cmpunordpd %xmm3, %xmm3
262 ; SSE-NEXT: andpd %xmm3, %xmm7
263 ; SSE-NEXT: andnpd %xmm4, %xmm3
264 ; SSE-NEXT: orpd %xmm7, %xmm3
267 ; AVX-LABEL: test_intrinsic_fmin_v8f64:
269 ; AVX-NEXT: vminpd %ymm0, %ymm2, %ymm4
270 ; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
271 ; AVX-NEXT: vblendvpd %ymm0, %ymm2, %ymm4, %ymm0
272 ; AVX-NEXT: vminpd %ymm1, %ymm3, %ymm2
273 ; AVX-NEXT: vcmpunordpd %ymm1, %ymm1, %ymm1
274 ; AVX-NEXT: vblendvpd %ymm1, %ymm3, %ymm2, %ymm1
276 %z = call <8 x double> @llvm.minnum.v8f64(<8 x double> %x, <8 x double> %y) readnone
280 ; The IR-level FMF propagate to the node. With nnan, there's no need to blend.
282 define float @minnum_intrinsic_nnan_fmf_f32(float %a, float %b) {
283 ; SSE-LABEL: minnum_intrinsic_nnan_fmf_f32:
285 ; SSE-NEXT: minss %xmm1, %xmm0
288 ; AVX-LABEL: minnum_intrinsic_nnan_fmf_f32:
290 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0
292 %r = tail call nnan float @llvm.minnum.f32(float %a, float %b)
296 ; Make sure vectors work too.
298 define <2 x double> @minnum_intrinsic_nnan_fmf_v2f64(<2 x double> %a, <2 x double> %b) {
299 ; SSE-LABEL: minnum_intrinsic_nnan_fmf_v2f64:
301 ; SSE-NEXT: minpd %xmm1, %xmm0
304 ; AVX-LABEL: minnum_intrinsic_nnan_fmf_v2f64:
306 ; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
308 %r = tail call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b)
312 ; Current (but legacy someday): a function-level attribute should also enable the fold.
314 define double @minnum_intrinsic_nnan_attr_f64(double %a, double %b) #0 {
315 ; SSE-LABEL: minnum_intrinsic_nnan_attr_f64:
317 ; SSE-NEXT: minsd %xmm1, %xmm0
320 ; AVX-LABEL: minnum_intrinsic_nnan_attr_f64:
322 ; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0
324 %r = tail call double @llvm.minnum.f64(double %a, double %b)
328 ; Make sure vectors work too.
330 define <4 x float> @minnum_intrinsic_nnan_attr_v4f32(<4 x float> %a, <4 x float> %b) #0 {
331 ; SSE-LABEL: minnum_intrinsic_nnan_attr_v4f32:
333 ; SSE-NEXT: minps %xmm1, %xmm0
336 ; AVX-LABEL: minnum_intrinsic_nnan_attr_v4f32:
338 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
340 %r = tail call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b)
344 attributes #0 = { "no-nans-fp-math"="true" }