1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
6 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512FP16
8 define half @roundeven_f16(half %h) {
9 ; SSE2-LABEL: roundeven_f16:
10 ; SSE2: ## %bb.0: ## %entry
11 ; SSE2-NEXT: pushq %rax
12 ; SSE2-NEXT: .cfi_def_cfa_offset 16
13 ; SSE2-NEXT: pextrw $0, %xmm0, %eax
14 ; SSE2-NEXT: movzwl %ax, %edi
15 ; SSE2-NEXT: callq ___extendhfsf2
16 ; SSE2-NEXT: callq _roundevenf
17 ; SSE2-NEXT: callq ___truncsfhf2
18 ; SSE2-NEXT: ## kill: def $ax killed $ax def $eax
19 ; SSE2-NEXT: pinsrw $0, %eax, %xmm0
20 ; SSE2-NEXT: popq %rax
23 ; SSE41-LABEL: roundeven_f16:
24 ; SSE41: ## %bb.0: ## %entry
25 ; SSE41-NEXT: pushq %rax
26 ; SSE41-NEXT: .cfi_def_cfa_offset 16
27 ; SSE41-NEXT: pextrw $0, %xmm0, %eax
28 ; SSE41-NEXT: movzwl %ax, %edi
29 ; SSE41-NEXT: callq ___extendhfsf2
30 ; SSE41-NEXT: roundss $8, %xmm0, %xmm0
31 ; SSE41-NEXT: callq ___truncsfhf2
32 ; SSE41-NEXT: ## kill: def $ax killed $ax def $eax
33 ; SSE41-NEXT: pinsrw $0, %eax, %xmm0
34 ; SSE41-NEXT: popq %rax
37 ; AVX1-LABEL: roundeven_f16:
38 ; AVX1: ## %bb.0: ## %entry
39 ; AVX1-NEXT: pushq %rax
40 ; AVX1-NEXT: .cfi_def_cfa_offset 16
41 ; AVX1-NEXT: vpextrw $0, %xmm0, %eax
42 ; AVX1-NEXT: movzwl %ax, %edi
43 ; AVX1-NEXT: callq ___extendhfsf2
44 ; AVX1-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
45 ; AVX1-NEXT: callq ___truncsfhf2
46 ; AVX1-NEXT: ## kill: def $ax killed $ax def $eax
47 ; AVX1-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
48 ; AVX1-NEXT: popq %rax
51 ; AVX512F-LABEL: roundeven_f16:
52 ; AVX512F: ## %bb.0: ## %entry
53 ; AVX512F-NEXT: vpextrw $0, %xmm0, %eax
54 ; AVX512F-NEXT: vmovd %eax, %xmm0
55 ; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
56 ; AVX512F-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
57 ; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0
58 ; AVX512F-NEXT: vmovd %xmm0, %eax
59 ; AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
62 ; AVX512FP16-LABEL: roundeven_f16:
63 ; AVX512FP16: ## %bb.0: ## %entry
64 ; AVX512FP16-NEXT: vrndscalesh $8, %xmm0, %xmm0, %xmm0
65 ; AVX512FP16-NEXT: retq
67 %a = call half @llvm.roundeven.f16(half %h)
71 define float @roundeven_f32(float %x) {
72 ; SSE2-LABEL: roundeven_f32:
74 ; SSE2-NEXT: jmp _roundevenf ## TAILCALL
76 ; SSE41-LABEL: roundeven_f32:
78 ; SSE41-NEXT: roundss $8, %xmm0, %xmm0
81 ; AVX-LABEL: roundeven_f32:
83 ; AVX-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
85 %a = call float @llvm.roundeven.f32(float %x)
89 define double @roundeven_f64(double %x) {
90 ; SSE2-LABEL: roundeven_f64:
92 ; SSE2-NEXT: jmp _roundeven ## TAILCALL
94 ; SSE41-LABEL: roundeven_f64:
96 ; SSE41-NEXT: roundsd $8, %xmm0, %xmm0
99 ; AVX-LABEL: roundeven_f64:
101 ; AVX-NEXT: vroundsd $8, %xmm0, %xmm0, %xmm0
103 %a = call double @llvm.roundeven.f64(double %x)
107 define <4 x float> @roundeven_v4f32(<4 x float> %x) {
108 ; SSE2-LABEL: roundeven_v4f32:
110 ; SSE2-NEXT: subq $56, %rsp
111 ; SSE2-NEXT: .cfi_def_cfa_offset 64
112 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
113 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
114 ; SSE2-NEXT: callq _roundevenf
115 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
116 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
117 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
118 ; SSE2-NEXT: callq _roundevenf
119 ; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
120 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
121 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
122 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
123 ; SSE2-NEXT: callq _roundevenf
124 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
125 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
126 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
127 ; SSE2-NEXT: callq _roundevenf
128 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
129 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
130 ; SSE2-NEXT: unpcklpd (%rsp), %xmm1 ## 16-byte Folded Reload
131 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
132 ; SSE2-NEXT: movaps %xmm1, %xmm0
133 ; SSE2-NEXT: addq $56, %rsp
136 ; SSE41-LABEL: roundeven_v4f32:
138 ; SSE41-NEXT: roundps $8, %xmm0, %xmm0
141 ; AVX-LABEL: roundeven_v4f32:
143 ; AVX-NEXT: vroundps $8, %xmm0, %xmm0
145 %a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x)
149 define <2 x double> @roundeven_v2f64(<2 x double> %x) {
150 ; SSE2-LABEL: roundeven_v2f64:
152 ; SSE2-NEXT: subq $40, %rsp
153 ; SSE2-NEXT: .cfi_def_cfa_offset 48
154 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
155 ; SSE2-NEXT: callq _roundeven
156 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
157 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
158 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
159 ; SSE2-NEXT: callq _roundeven
160 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
161 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
162 ; SSE2-NEXT: movaps %xmm1, %xmm0
163 ; SSE2-NEXT: addq $40, %rsp
166 ; SSE41-LABEL: roundeven_v2f64:
168 ; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
171 ; AVX-LABEL: roundeven_v2f64:
173 ; AVX-NEXT: vroundpd $8, %xmm0, %xmm0
175 %a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x)
179 define <8 x float> @roundeven_v8f32(<8 x float> %x) {
180 ; SSE2-LABEL: roundeven_v8f32:
182 ; SSE2-NEXT: subq $72, %rsp
183 ; SSE2-NEXT: .cfi_def_cfa_offset 80
184 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
185 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
186 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
187 ; SSE2-NEXT: callq _roundevenf
188 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
189 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
190 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
191 ; SSE2-NEXT: callq _roundevenf
192 ; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
193 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
194 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
195 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
196 ; SSE2-NEXT: callq _roundevenf
197 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
198 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
199 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
200 ; SSE2-NEXT: callq _roundevenf
201 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
202 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
203 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
204 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
205 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
206 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
207 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
208 ; SSE2-NEXT: callq _roundevenf
209 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
210 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
211 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
212 ; SSE2-NEXT: callq _roundevenf
213 ; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
214 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
215 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
216 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
217 ; SSE2-NEXT: callq _roundevenf
218 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
219 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
220 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
221 ; SSE2-NEXT: callq _roundevenf
222 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
223 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
224 ; SSE2-NEXT: unpcklpd (%rsp), %xmm1 ## 16-byte Folded Reload
225 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
226 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
227 ; SSE2-NEXT: addq $72, %rsp
230 ; SSE41-LABEL: roundeven_v8f32:
232 ; SSE41-NEXT: roundps $8, %xmm0, %xmm0
233 ; SSE41-NEXT: roundps $8, %xmm1, %xmm1
236 ; AVX-LABEL: roundeven_v8f32:
238 ; AVX-NEXT: vroundps $8, %ymm0, %ymm0
240 %a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x)
244 define <4 x double> @roundeven_v4f64(<4 x double> %x) {
245 ; SSE2-LABEL: roundeven_v4f64:
247 ; SSE2-NEXT: subq $56, %rsp
248 ; SSE2-NEXT: .cfi_def_cfa_offset 64
249 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
250 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
251 ; SSE2-NEXT: callq _roundeven
252 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
253 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
254 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
255 ; SSE2-NEXT: callq _roundeven
256 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
257 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
258 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
259 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
260 ; SSE2-NEXT: callq _roundeven
261 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
262 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
263 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
264 ; SSE2-NEXT: callq _roundeven
265 ; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
266 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
267 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
268 ; SSE2-NEXT: addq $56, %rsp
271 ; SSE41-LABEL: roundeven_v4f64:
273 ; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
274 ; SSE41-NEXT: roundpd $8, %xmm1, %xmm1
277 ; AVX-LABEL: roundeven_v4f64:
279 ; AVX-NEXT: vroundpd $8, %ymm0, %ymm0
281 %a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x)
285 define <16 x float> @roundeven_v16f32(<16 x float> %x) {
286 ; SSE2-LABEL: roundeven_v16f32:
288 ; SSE2-NEXT: subq $104, %rsp
289 ; SSE2-NEXT: .cfi_def_cfa_offset 112
290 ; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
291 ; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
292 ; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
293 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
294 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
295 ; SSE2-NEXT: callq _roundevenf
296 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
297 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
298 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
299 ; SSE2-NEXT: callq _roundevenf
300 ; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
301 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
302 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
303 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
304 ; SSE2-NEXT: callq _roundevenf
305 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
306 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
307 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
308 ; SSE2-NEXT: callq _roundevenf
309 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
310 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
311 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
312 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
313 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
314 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
315 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
316 ; SSE2-NEXT: callq _roundevenf
317 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
318 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
319 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
320 ; SSE2-NEXT: callq _roundevenf
321 ; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
322 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
323 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
324 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
325 ; SSE2-NEXT: callq _roundevenf
326 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
327 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
328 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
329 ; SSE2-NEXT: callq _roundevenf
330 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
331 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
332 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
333 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
334 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
335 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
336 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
337 ; SSE2-NEXT: callq _roundevenf
338 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
339 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
340 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
341 ; SSE2-NEXT: callq _roundevenf
342 ; SSE2-NEXT: unpcklps (%rsp), %xmm0 ## 16-byte Folded Reload
343 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
344 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
345 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
346 ; SSE2-NEXT: callq _roundevenf
347 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
348 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
349 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
350 ; SSE2-NEXT: callq _roundevenf
351 ; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
352 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
353 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
354 ; SSE2-NEXT: ## xmm1 = xmm1[0],mem[0]
355 ; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
356 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
357 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
358 ; SSE2-NEXT: callq _roundevenf
359 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
360 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
361 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
362 ; SSE2-NEXT: callq _roundevenf
363 ; SSE2-NEXT: unpcklps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
364 ; SSE2-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
365 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
366 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
367 ; SSE2-NEXT: callq _roundevenf
368 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
369 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
370 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
371 ; SSE2-NEXT: callq _roundevenf
372 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
373 ; SSE2-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
374 ; SSE2-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Folded Reload
375 ; SSE2-NEXT: ## xmm3 = xmm3[0],mem[0]
376 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
377 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
378 ; SSE2-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
379 ; SSE2-NEXT: addq $104, %rsp
382 ; SSE41-LABEL: roundeven_v16f32:
384 ; SSE41-NEXT: roundps $8, %xmm0, %xmm0
385 ; SSE41-NEXT: roundps $8, %xmm1, %xmm1
386 ; SSE41-NEXT: roundps $8, %xmm2, %xmm2
387 ; SSE41-NEXT: roundps $8, %xmm3, %xmm3
390 ; AVX1-LABEL: roundeven_v16f32:
392 ; AVX1-NEXT: vroundps $8, %ymm0, %ymm0
393 ; AVX1-NEXT: vroundps $8, %ymm1, %ymm1
396 ; AVX512-LABEL: roundeven_v16f32:
398 ; AVX512-NEXT: vrndscaleps $8, %zmm0, %zmm0
400 %a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x)
404 define <8 x double> @roundeven_v8f64(<8 x double> %x) {
405 ; SSE2-LABEL: roundeven_v8f64:
407 ; SSE2-NEXT: subq $88, %rsp
408 ; SSE2-NEXT: .cfi_def_cfa_offset 96
409 ; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
410 ; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
411 ; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
412 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
413 ; SSE2-NEXT: callq _roundeven
414 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
415 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
416 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
417 ; SSE2-NEXT: callq _roundeven
418 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
419 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
420 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
421 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
422 ; SSE2-NEXT: callq _roundeven
423 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
424 ; SSE2-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
425 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
426 ; SSE2-NEXT: callq _roundeven
427 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
428 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
429 ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
430 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
431 ; SSE2-NEXT: callq _roundeven
432 ; SSE2-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
433 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
434 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
435 ; SSE2-NEXT: callq _roundeven
436 ; SSE2-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
437 ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
438 ; SSE2-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
439 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
440 ; SSE2-NEXT: callq _roundeven
441 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
442 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
443 ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
444 ; SSE2-NEXT: callq _roundeven
445 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
446 ; SSE2-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
447 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
448 ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
449 ; SSE2-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
450 ; SSE2-NEXT: addq $88, %rsp
453 ; SSE41-LABEL: roundeven_v8f64:
455 ; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
456 ; SSE41-NEXT: roundpd $8, %xmm1, %xmm1
457 ; SSE41-NEXT: roundpd $8, %xmm2, %xmm2
458 ; SSE41-NEXT: roundpd $8, %xmm3, %xmm3
461 ; AVX1-LABEL: roundeven_v8f64:
463 ; AVX1-NEXT: vroundpd $8, %ymm0, %ymm0
464 ; AVX1-NEXT: vroundpd $8, %ymm1, %ymm1
467 ; AVX512-LABEL: roundeven_v8f64:
469 ; AVX512-NEXT: vrndscalepd $8, %zmm0, %zmm0
471 %a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x)
475 declare half @llvm.roundeven.f16(half)
476 declare float @llvm.roundeven.f32(float)
477 declare double @llvm.roundeven.f64(double)
478 declare <4 x float> @llvm.roundeven.v4f32(<4 x float>)
479 declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
480 declare <8 x float> @llvm.roundeven.v8f32(<8 x float>)
481 declare <4 x double> @llvm.roundeven.v4f64(<4 x double>)
482 declare <16 x float> @llvm.roundeven.v16f32(<16 x float>)
483 declare <8 x double> @llvm.roundeven.v8f64(<8 x double>)