1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
13 define float @test_v2f32(<2 x float> %a0) {
14 ; SSE2-LABEL: test_v2f32:
16 ; SSE2-NEXT: movaps %xmm0, %xmm1
17 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
18 ; SSE2-NEXT: maxss %xmm1, %xmm0
21 ; SSE41-LABEL: test_v2f32:
23 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
24 ; SSE41-NEXT: maxss %xmm1, %xmm0
27 ; AVX-LABEL: test_v2f32:
29 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
30 ; AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
33 ; AVX512-LABEL: test_v2f32:
35 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
36 ; AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
38 %1 = call nnan float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %a0)
42 define float @test_v4f32(<4 x float> %a0) {
43 ; SSE2-LABEL: test_v4f32:
45 ; SSE2-NEXT: movaps %xmm0, %xmm1
46 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
47 ; SSE2-NEXT: maxps %xmm1, %xmm0
48 ; SSE2-NEXT: movaps %xmm0, %xmm1
49 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
50 ; SSE2-NEXT: maxss %xmm1, %xmm0
53 ; SSE41-LABEL: test_v4f32:
55 ; SSE41-NEXT: movaps %xmm0, %xmm1
56 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
57 ; SSE41-NEXT: maxps %xmm1, %xmm0
58 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
59 ; SSE41-NEXT: maxss %xmm1, %xmm0
62 ; AVX-LABEL: test_v4f32:
64 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
65 ; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
66 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
67 ; AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
70 ; AVX512-LABEL: test_v4f32:
72 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
73 ; AVX512-NEXT: vmaxps %xmm1, %xmm0, %xmm0
74 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
75 ; AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
77 %1 = call nnan float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %a0)
81 define float @test_v8f32(<8 x float> %a0) {
82 ; SSE2-LABEL: test_v8f32:
84 ; SSE2-NEXT: maxps %xmm1, %xmm0
85 ; SSE2-NEXT: movaps %xmm0, %xmm1
86 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
87 ; SSE2-NEXT: maxps %xmm1, %xmm0
88 ; SSE2-NEXT: movaps %xmm0, %xmm1
89 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
90 ; SSE2-NEXT: maxss %xmm1, %xmm0
93 ; SSE41-LABEL: test_v8f32:
95 ; SSE41-NEXT: maxps %xmm1, %xmm0
96 ; SSE41-NEXT: movaps %xmm0, %xmm1
97 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
98 ; SSE41-NEXT: maxps %xmm1, %xmm0
99 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
100 ; SSE41-NEXT: maxss %xmm1, %xmm0
103 ; AVX-LABEL: test_v8f32:
105 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
106 ; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
107 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
108 ; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
109 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
110 ; AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
111 ; AVX-NEXT: vzeroupper
114 ; AVX512-LABEL: test_v8f32:
116 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
117 ; AVX512-NEXT: vmaxps %xmm1, %xmm0, %xmm0
118 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
119 ; AVX512-NEXT: vmaxps %xmm1, %xmm0, %xmm0
120 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
121 ; AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
122 ; AVX512-NEXT: vzeroupper
124 %1 = call nnan float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %a0)
128 define float @test_v16f32(<16 x float> %a0) {
129 ; SSE2-LABEL: test_v16f32:
131 ; SSE2-NEXT: maxps %xmm3, %xmm1
132 ; SSE2-NEXT: maxps %xmm2, %xmm0
133 ; SSE2-NEXT: maxps %xmm1, %xmm0
134 ; SSE2-NEXT: movaps %xmm0, %xmm1
135 ; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
136 ; SSE2-NEXT: maxps %xmm1, %xmm0
137 ; SSE2-NEXT: movaps %xmm0, %xmm1
138 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
139 ; SSE2-NEXT: maxss %xmm1, %xmm0
142 ; SSE41-LABEL: test_v16f32:
144 ; SSE41-NEXT: maxps %xmm3, %xmm1
145 ; SSE41-NEXT: maxps %xmm2, %xmm0
146 ; SSE41-NEXT: maxps %xmm1, %xmm0
147 ; SSE41-NEXT: movaps %xmm0, %xmm1
148 ; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
149 ; SSE41-NEXT: maxps %xmm1, %xmm0
150 ; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
151 ; SSE41-NEXT: maxss %xmm1, %xmm0
154 ; AVX-LABEL: test_v16f32:
156 ; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0
157 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
158 ; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
159 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
160 ; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
161 ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
162 ; AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
163 ; AVX-NEXT: vzeroupper
166 ; AVX512-LABEL: test_v16f32:
168 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
169 ; AVX512-NEXT: vmaxps %zmm1, %zmm0, %zmm0
170 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
171 ; AVX512-NEXT: vmaxps %xmm1, %xmm0, %xmm0
172 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
173 ; AVX512-NEXT: vmaxps %xmm1, %xmm0, %xmm0
174 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
175 ; AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
176 ; AVX512-NEXT: vzeroupper
178 %1 = call nnan float @llvm.experimental.vector.reduce.fmax.v16f32(<16 x float> %a0)
186 define double @test_v2f64(<2 x double> %a0) {
187 ; SSE-LABEL: test_v2f64:
189 ; SSE-NEXT: movapd %xmm0, %xmm1
190 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
191 ; SSE-NEXT: maxsd %xmm1, %xmm0
194 ; AVX-LABEL: test_v2f64:
196 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
197 ; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
200 ; AVX512-LABEL: test_v2f64:
202 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
203 ; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
205 %1 = call nnan double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %a0)
209 define double @test_v4f64(<4 x double> %a0) {
210 ; SSE-LABEL: test_v4f64:
212 ; SSE-NEXT: maxpd %xmm1, %xmm0
213 ; SSE-NEXT: movapd %xmm0, %xmm1
214 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
215 ; SSE-NEXT: maxsd %xmm1, %xmm0
218 ; AVX-LABEL: test_v4f64:
220 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
221 ; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
222 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
223 ; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
224 ; AVX-NEXT: vzeroupper
227 ; AVX512-LABEL: test_v4f64:
229 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
230 ; AVX512-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
231 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
232 ; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
233 ; AVX512-NEXT: vzeroupper
235 %1 = call nnan double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %a0)
239 define double @test_v8f64(<8 x double> %a0) {
240 ; SSE-LABEL: test_v8f64:
242 ; SSE-NEXT: maxpd %xmm3, %xmm1
243 ; SSE-NEXT: maxpd %xmm2, %xmm0
244 ; SSE-NEXT: maxpd %xmm1, %xmm0
245 ; SSE-NEXT: movapd %xmm0, %xmm1
246 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
247 ; SSE-NEXT: maxsd %xmm1, %xmm0
250 ; AVX-LABEL: test_v8f64:
252 ; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
253 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
254 ; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
255 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
256 ; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
257 ; AVX-NEXT: vzeroupper
260 ; AVX512-LABEL: test_v8f64:
262 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
263 ; AVX512-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
264 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
265 ; AVX512-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
266 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
267 ; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
268 ; AVX512-NEXT: vzeroupper
270 %1 = call nnan double @llvm.experimental.vector.reduce.fmax.v8f64(<8 x double> %a0)
274 define double @test_v16f64(<16 x double> %a0) {
275 ; SSE-LABEL: test_v16f64:
277 ; SSE-NEXT: maxpd %xmm6, %xmm2
278 ; SSE-NEXT: maxpd %xmm4, %xmm0
279 ; SSE-NEXT: maxpd %xmm2, %xmm0
280 ; SSE-NEXT: maxpd %xmm7, %xmm3
281 ; SSE-NEXT: maxpd %xmm5, %xmm1
282 ; SSE-NEXT: maxpd %xmm3, %xmm1
283 ; SSE-NEXT: maxpd %xmm1, %xmm0
284 ; SSE-NEXT: movapd %xmm0, %xmm1
285 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
286 ; SSE-NEXT: maxsd %xmm1, %xmm0
289 ; AVX-LABEL: test_v16f64:
291 ; AVX-NEXT: vmaxpd %ymm3, %ymm1, %ymm1
292 ; AVX-NEXT: vmaxpd %ymm2, %ymm0, %ymm0
293 ; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
294 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
295 ; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
296 ; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
297 ; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
298 ; AVX-NEXT: vzeroupper
301 ; AVX512-LABEL: test_v16f64:
303 ; AVX512-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
304 ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
305 ; AVX512-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
306 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
307 ; AVX512-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
308 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
309 ; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm0
310 ; AVX512-NEXT: vzeroupper
312 %1 = call nnan double @llvm.experimental.vector.reduce.fmax.v16f64(<16 x double> %a0)
316 declare float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float>)
317 declare float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float>)
318 declare float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float>)
319 declare float @llvm.experimental.vector.reduce.fmax.v16f32(<16 x float>)
321 declare double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double>)
322 declare double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double>)
323 declare double @llvm.experimental.vector.reduce.fmax.v8f64(<8 x double>)
324 declare double @llvm.experimental.vector.reduce.fmax.v16f64(<16 x double>)