1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
6 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512FP16
8 define half @roundeven_f16(half %h) {
9 ; SSE2-LABEL: roundeven_f16:
10 ; SSE2: ## %bb.0: ## %entry
11 ; SSE2-NEXT: pushq %rax
12 ; SSE2-NEXT: .cfi_def_cfa_offset 16
13 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
14 ; SSE2-NEXT: movzwl %ax, %edi
15 ; SSE2-NEXT: callq ___extendhfsf2
16 ; SSE2-NEXT: callq _roundevenf
17 ; SSE2-NEXT: callq ___truncsfhf2
18 ; SSE2-NEXT: ## kill: def $ax killed $ax def $eax
19 ; SSE2-NEXT: pinsrw $0, %eax, %xmm0
20 ; SSE2-NEXT: popq %rax
23 ; SSE41-LABEL: roundeven_f16:
24 ; SSE41: ## %bb.0: ## %entry
25 ; SSE41-NEXT: pushq %rax
26 ; SSE41-NEXT: .cfi_def_cfa_offset 16
27 ; SSE41-NEXT: pextrw $0, %xmm0, %eax
28 ; SSE41-NEXT: movzwl %ax, %edi
29 ; SSE41-NEXT: callq ___extendhfsf2
30 ; SSE41-NEXT: roundss $8, %xmm0, %xmm0
31 ; SSE41-NEXT: callq ___truncsfhf2
32 ; SSE41-NEXT: ## kill: def $ax killed $ax def $eax
33 ; SSE41-NEXT: pinsrw $0, %eax, %xmm0
34 ; SSE41-NEXT: popq %rax
37 ; AVX1-LABEL: roundeven_f16:
38 ; AVX1: ## %bb.0: ## %entry
39 ; AVX1-NEXT: pushq %rax
40 ; AVX1-NEXT: .cfi_def_cfa_offset 16
41 ; AVX1-NEXT: vpextrw $0, %xmm0, %eax
42 ; AVX1-NEXT: movzwl %ax, %edi
43 ; AVX1-NEXT: callq ___extendhfsf2
44 ; AVX1-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
45 ; AVX1-NEXT: callq ___truncsfhf2
46 ; AVX1-NEXT: ## kill: def $ax killed $ax def $eax
47 ; AVX1-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
48 ; AVX1-NEXT: popq %rax
51 ; AVX512F-LABEL: roundeven_f16:
52 ; AVX512F: ## %bb.0: ## %entry
53 ; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
54 ; AVX512F-NEXT: movzwl %ax, %eax
55 ; AVX512F-NEXT: vmovd %eax, %xmm0
56 ; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
57 ; AVX512F-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
58 ; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
59 ; AVX512F-NEXT: vmovd %xmm0, %eax
60 ; AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
63 ; AVX512FP16-LABEL: roundeven_f16:
64 ; AVX512FP16: ## %bb.0: ## %entry
65 ; AVX512FP16-NEXT: vrndscalesh $8, %xmm0, %xmm0, %xmm0
66 ; AVX512FP16-NEXT: retq
68 %a = call half @llvm.roundeven.f16(half %h)
72 define float @roundeven_f32(float %x) {
73 ; SSE2-LABEL: roundeven_f32:
75 ; SSE2-NEXT: jmp _roundevenf ## TAILCALL
77 ; SSE41-LABEL: roundeven_f32:
79 ; SSE41-NEXT: roundss $8, %xmm0, %xmm0
82 ; AVX-LABEL: roundeven_f32:
84 ; AVX-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
86 %a = call float @llvm.roundeven.f32(float %x)
90 define double @roundeven_f64(double %x) {
91 ; SSE2-LABEL: roundeven_f64:
93 ; SSE2-NEXT: jmp _roundeven ## TAILCALL
95 ; SSE41-LABEL: roundeven_f64:
97 ; SSE41-NEXT: roundsd $8, %xmm0, %xmm0
100 ; AVX-LABEL: roundeven_f64:
102 ; AVX-NEXT: vroundsd $8, %xmm0, %xmm0, %xmm0
104 %a = call double @llvm.roundeven.f64(double %x)
108 define <4 x float> @roundeven_v4f32(<4 x float> %x) {
109 ; SSE2-LABEL: roundeven_v4f32:
111 ; SSE2-NEXT: subq $56, %rsp
112 ; SSE2-NEXT: .cfi_def_cfa_offset 64
113 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
114 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
115 ; SSE2-NEXT: callq _roundevenf
116 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
117 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
118 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
119 ; SSE2-NEXT: callq _roundevenf
120 ; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
121 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
122 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
123 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
124 ; SSE2-NEXT: callq _roundevenf
125 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
126 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
127 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
128 ; SSE2-NEXT: callq _roundevenf
129 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
130 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
131 ; SSE2-NEXT: unpcklpd (%rsp), %xmm1 ## 16-byte Folded Reload
132 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
133 ; SSE2-NEXT: movaps %xmm1, %xmm0
134 ; SSE2-NEXT: addq $56, %rsp
137 ; SSE41-LABEL: roundeven_v4f32:
139 ; SSE41-NEXT: roundps $8, %xmm0, %xmm0
142 ; AVX-LABEL: roundeven_v4f32:
144 ; AVX-NEXT: vroundps $8, %xmm0, %xmm0
146 %a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x)
150 define <2 x double> @roundeven_v2f64(<2 x double> %x) {
151 ; SSE2-LABEL: roundeven_v2f64:
153 ; SSE2-NEXT: subq $40, %rsp
154 ; SSE2-NEXT: .cfi_def_cfa_offset 48
155 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
156 ; SSE2-NEXT: callq _roundeven
157 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
158 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
159 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
160 ; SSE2-NEXT: callq _roundeven
161 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
162 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
163 ; SSE2-NEXT: movaps %xmm1, %xmm0
164 ; SSE2-NEXT: addq $40, %rsp
167 ; SSE41-LABEL: roundeven_v2f64:
169 ; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
172 ; AVX-LABEL: roundeven_v2f64:
174 ; AVX-NEXT: vroundpd $8, %xmm0, %xmm0
176 %a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x)
180 define <8 x float> @roundeven_v8f32(<8 x float> %x) {
181 ; SSE2-LABEL: roundeven_v8f32:
183 ; SSE2-NEXT: subq $72, %rsp
184 ; SSE2-NEXT: .cfi_def_cfa_offset 80
185 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
186 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
187 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
188 ; SSE2-NEXT: callq _roundevenf
189 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
190 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
191 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
192 ; SSE2-NEXT: callq _roundevenf
193 ; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
194 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
195 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
196 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
197 ; SSE2-NEXT: callq _roundevenf
198 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
199 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
200 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
201 ; SSE2-NEXT: callq _roundevenf
202 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
203 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
204 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
205 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
206 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
207 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
208 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
209 ; SSE2-NEXT: callq _roundevenf
210 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
211 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
212 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
213 ; SSE2-NEXT: callq _roundevenf
214 ; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
215 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
216 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
217 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
218 ; SSE2-NEXT: callq _roundevenf
219 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
220 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
221 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
222 ; SSE2-NEXT: callq _roundevenf
223 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
224 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
225 ; SSE2-NEXT: unpcklpd (%rsp), %xmm1 ## 16-byte Folded Reload
226 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
227 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
228 ; SSE2-NEXT: addq $72, %rsp
231 ; SSE41-LABEL: roundeven_v8f32:
233 ; SSE41-NEXT: roundps $8, %xmm0, %xmm0
234 ; SSE41-NEXT: roundps $8, %xmm1, %xmm1
237 ; AVX-LABEL: roundeven_v8f32:
239 ; AVX-NEXT: vroundps $8, %ymm0, %ymm0
241 %a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x)
245 define <4 x double> @roundeven_v4f64(<4 x double> %x) {
246 ; SSE2-LABEL: roundeven_v4f64:
248 ; SSE2-NEXT: subq $56, %rsp
249 ; SSE2-NEXT: .cfi_def_cfa_offset 64
250 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
251 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
252 ; SSE2-NEXT: callq _roundeven
253 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
254 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
255 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
256 ; SSE2-NEXT: callq _roundeven
257 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
258 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
259 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
260 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
261 ; SSE2-NEXT: callq _roundeven
262 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
263 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
264 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
265 ; SSE2-NEXT: callq _roundeven
266 ; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
267 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
268 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
269 ; SSE2-NEXT: addq $56, %rsp
272 ; SSE41-LABEL: roundeven_v4f64:
274 ; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
275 ; SSE41-NEXT: roundpd $8, %xmm1, %xmm1
278 ; AVX-LABEL: roundeven_v4f64:
280 ; AVX-NEXT: vroundpd $8, %ymm0, %ymm0
282 %a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x)
286 define <16 x float> @roundeven_v16f32(<16 x float> %x) {
287 ; SSE2-LABEL: roundeven_v16f32:
289 ; SSE2-NEXT: subq $104, %rsp
290 ; SSE2-NEXT: .cfi_def_cfa_offset 112
291 ; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
292 ; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
293 ; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
294 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
295 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
296 ; SSE2-NEXT: callq _roundevenf
297 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
298 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
299 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
300 ; SSE2-NEXT: callq _roundevenf
301 ; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
302 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
303 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
304 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
305 ; SSE2-NEXT: callq _roundevenf
306 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
307 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
308 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
309 ; SSE2-NEXT: callq _roundevenf
310 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
311 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
312 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
313 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
314 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
315 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
316 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
317 ; SSE2-NEXT: callq _roundevenf
318 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
319 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
320 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
321 ; SSE2-NEXT: callq _roundevenf
322 ; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
323 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
324 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
325 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
326 ; SSE2-NEXT: callq _roundevenf
327 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
328 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
329 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
330 ; SSE2-NEXT: callq _roundevenf
331 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
332 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
333 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
334 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
335 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
336 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
337 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
338 ; SSE2-NEXT: callq _roundevenf
339 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
340 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
341 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
342 ; SSE2-NEXT: callq _roundevenf
343 ; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
344 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
345 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
346 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
347 ; SSE2-NEXT: callq _roundevenf
348 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
349 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
350 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
351 ; SSE2-NEXT: callq _roundevenf
352 ; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
353 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
354 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
355 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
356 ; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
357 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
358 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
359 ; SSE2-NEXT: callq _roundevenf
360 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
361 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
362 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
363 ; SSE2-NEXT: callq _roundevenf
364 ; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
365 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
366 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
367 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
368 ; SSE2-NEXT: callq _roundevenf
369 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
370 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
371 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
372 ; SSE2-NEXT: callq _roundevenf
373 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
374 ; SSE2-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
375 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Folded Reload
376 ; SSE2-NEXT: ## xmm3 = xmm3[0],mem[0]
377 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
378 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
379 ; SSE2-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
380 ; SSE2-NEXT: addq $104, %rsp
383 ; SSE41-LABEL: roundeven_v16f32:
385 ; SSE41-NEXT: roundps $8, %xmm0, %xmm0
386 ; SSE41-NEXT: roundps $8, %xmm1, %xmm1
387 ; SSE41-NEXT: roundps $8, %xmm2, %xmm2
388 ; SSE41-NEXT: roundps $8, %xmm3, %xmm3
391 ; AVX1-LABEL: roundeven_v16f32:
393 ; AVX1-NEXT: vroundps $8, %ymm0, %ymm0
394 ; AVX1-NEXT: vroundps $8, %ymm1, %ymm1
397 ; AVX512-LABEL: roundeven_v16f32:
399 ; AVX512-NEXT: vrndscaleps $8, %zmm0, %zmm0
401 %a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x)
405 define <8 x double> @roundeven_v8f64(<8 x double> %x) {
406 ; SSE2-LABEL: roundeven_v8f64:
408 ; SSE2-NEXT: subq $88, %rsp
409 ; SSE2-NEXT: .cfi_def_cfa_offset 96
410 ; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
411 ; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
412 ; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
413 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
414 ; SSE2-NEXT: callq _roundeven
415 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
416 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
417 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
418 ; SSE2-NEXT: callq _roundeven
419 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
420 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
421 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
422 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
423 ; SSE2-NEXT: callq _roundeven
424 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
425 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
426 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
427 ; SSE2-NEXT: callq _roundeven
428 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
429 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
430 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
431 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
432 ; SSE2-NEXT: callq _roundeven
433 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
434 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
435 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
436 ; SSE2-NEXT: callq _roundeven
437 ; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
438 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
439 ; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
440 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
441 ; SSE2-NEXT: callq _roundeven
442 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
443 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
444 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
445 ; SSE2-NEXT: callq _roundeven
446 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
447 ; SSE2-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
448 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
449 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
450 ; SSE2-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
451 ; SSE2-NEXT: addq $88, %rsp
454 ; SSE41-LABEL: roundeven_v8f64:
456 ; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
457 ; SSE41-NEXT: roundpd $8, %xmm1, %xmm1
458 ; SSE41-NEXT: roundpd $8, %xmm2, %xmm2
459 ; SSE41-NEXT: roundpd $8, %xmm3, %xmm3
462 ; AVX1-LABEL: roundeven_v8f64:
464 ; AVX1-NEXT: vroundpd $8, %ymm0, %ymm0
465 ; AVX1-NEXT: vroundpd $8, %ymm1, %ymm1
468 ; AVX512-LABEL: roundeven_v8f64:
470 ; AVX512-NEXT: vrndscalepd $8, %zmm0, %zmm0
472 %a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x)
476 declare half @llvm.roundeven.f16(half)
477 declare float @llvm.roundeven.f32(float)
478 declare double @llvm.roundeven.f64(double)
479 declare <4 x float> @llvm.roundeven.v4f32(<4 x float>)
480 declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
481 declare <8 x float> @llvm.roundeven.v8f32(<8 x float>)
482 declare <4 x double> @llvm.roundeven.v4f64(<4 x double>)
483 declare <16 x float> @llvm.roundeven.v16f32(<16 x float>)
484 declare <8 x double> @llvm.roundeven.v8f64(<8 x double>)