1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64-SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefix=X64-AVX
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefix=X64-AVX
6 define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
7 ; X64-SSE-LABEL: llrint_v1i64_v1f32:
9 ; X64-SSE-NEXT: cvtss2si %xmm0, %rax
12 ; X64-AVX-LABEL: llrint_v1i64_v1f32:
14 ; X64-AVX-NEXT: vcvtss2si %xmm0, %rax
16 %a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
19 declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
21 define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
22 ; X64-SSE-LABEL: llrint_v2i64_v2f32:
24 ; X64-SSE-NEXT: cvtss2si %xmm0, %rax
25 ; X64-SSE-NEXT: movq %rax, %xmm1
26 ; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
27 ; X64-SSE-NEXT: cvtss2si %xmm0, %rax
28 ; X64-SSE-NEXT: movq %rax, %xmm0
29 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
30 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0
33 ; X64-AVX-LABEL: llrint_v2i64_v2f32:
35 ; X64-AVX-NEXT: vcvtss2si %xmm0, %rax
36 ; X64-AVX-NEXT: vmovq %rax, %xmm1
37 ; X64-AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
38 ; X64-AVX-NEXT: vcvtss2si %xmm0, %rax
39 ; X64-AVX-NEXT: vmovq %rax, %xmm0
40 ; X64-AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
42 %a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
45 declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
47 define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
48 ; X64-SSE-LABEL: llrint_v4i64_v4f32:
50 ; X64-SSE-NEXT: cvtss2si %xmm0, %rax
51 ; X64-SSE-NEXT: movq %rax, %xmm2
52 ; X64-SSE-NEXT: movaps %xmm0, %xmm1
53 ; X64-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
54 ; X64-SSE-NEXT: cvtss2si %xmm1, %rax
55 ; X64-SSE-NEXT: movq %rax, %xmm1
56 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
57 ; X64-SSE-NEXT: movaps %xmm0, %xmm1
58 ; X64-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
59 ; X64-SSE-NEXT: cvtss2si %xmm1, %rax
60 ; X64-SSE-NEXT: movq %rax, %xmm3
61 ; X64-SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
62 ; X64-SSE-NEXT: cvtss2si %xmm0, %rax
63 ; X64-SSE-NEXT: movq %rax, %xmm1
64 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
65 ; X64-SSE-NEXT: movdqa %xmm2, %xmm0
67 %a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
70 declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
72 define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
73 ; X64-SSE-LABEL: llrint_v8i64_v8f32:
75 ; X64-SSE-NEXT: movaps %xmm0, %xmm2
76 ; X64-SSE-NEXT: cvtss2si %xmm0, %rax
77 ; X64-SSE-NEXT: movq %rax, %xmm0
78 ; X64-SSE-NEXT: movaps %xmm2, %xmm3
79 ; X64-SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm2[1,1]
80 ; X64-SSE-NEXT: cvtss2si %xmm3, %rax
81 ; X64-SSE-NEXT: movq %rax, %xmm3
82 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
83 ; X64-SSE-NEXT: movaps %xmm2, %xmm3
84 ; X64-SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm2[3,3]
85 ; X64-SSE-NEXT: cvtss2si %xmm3, %rax
86 ; X64-SSE-NEXT: movq %rax, %xmm3
87 ; X64-SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
88 ; X64-SSE-NEXT: cvtss2si %xmm2, %rax
89 ; X64-SSE-NEXT: movq %rax, %xmm4
90 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
91 ; X64-SSE-NEXT: cvtss2si %xmm1, %rax
92 ; X64-SSE-NEXT: movq %rax, %xmm2
93 ; X64-SSE-NEXT: movaps %xmm1, %xmm3
94 ; X64-SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1]
95 ; X64-SSE-NEXT: cvtss2si %xmm3, %rax
96 ; X64-SSE-NEXT: movq %rax, %xmm3
97 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
98 ; X64-SSE-NEXT: movaps %xmm1, %xmm3
99 ; X64-SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm1[3,3]
100 ; X64-SSE-NEXT: cvtss2si %xmm3, %rax
101 ; X64-SSE-NEXT: movq %rax, %xmm5
102 ; X64-SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
103 ; X64-SSE-NEXT: cvtss2si %xmm1, %rax
104 ; X64-SSE-NEXT: movq %rax, %xmm3
105 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
106 ; X64-SSE-NEXT: movdqa %xmm4, %xmm1
108 %a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
111 declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
113 define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
114 ; X64-SSE-LABEL: llrint_v16i64_v16f32:
116 ; X64-SSE-NEXT: movq %rdi, %rax
117 ; X64-SSE-NEXT: cvtss2si %xmm0, %rcx
118 ; X64-SSE-NEXT: movq %rcx, %xmm4
119 ; X64-SSE-NEXT: movaps %xmm0, %xmm5
120 ; X64-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[1,1]
121 ; X64-SSE-NEXT: cvtss2si %xmm5, %rcx
122 ; X64-SSE-NEXT: movq %rcx, %xmm5
123 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
124 ; X64-SSE-NEXT: movaps %xmm0, %xmm5
125 ; X64-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm0[3,3]
126 ; X64-SSE-NEXT: cvtss2si %xmm5, %rcx
127 ; X64-SSE-NEXT: movq %rcx, %xmm5
128 ; X64-SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
129 ; X64-SSE-NEXT: cvtss2si %xmm0, %rcx
130 ; X64-SSE-NEXT: movq %rcx, %xmm0
131 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
132 ; X64-SSE-NEXT: cvtss2si %xmm1, %rcx
133 ; X64-SSE-NEXT: movq %rcx, %xmm5
134 ; X64-SSE-NEXT: movaps %xmm1, %xmm6
135 ; X64-SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm1[1,1]
136 ; X64-SSE-NEXT: cvtss2si %xmm6, %rcx
137 ; X64-SSE-NEXT: movq %rcx, %xmm6
138 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
139 ; X64-SSE-NEXT: movaps %xmm1, %xmm6
140 ; X64-SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm1[3,3]
141 ; X64-SSE-NEXT: cvtss2si %xmm6, %rcx
142 ; X64-SSE-NEXT: movq %rcx, %xmm6
143 ; X64-SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
144 ; X64-SSE-NEXT: cvtss2si %xmm1, %rcx
145 ; X64-SSE-NEXT: movq %rcx, %xmm1
146 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm6[0]
147 ; X64-SSE-NEXT: cvtss2si %xmm2, %rcx
148 ; X64-SSE-NEXT: movq %rcx, %xmm6
149 ; X64-SSE-NEXT: movaps %xmm2, %xmm7
150 ; X64-SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1],xmm2[1,1]
151 ; X64-SSE-NEXT: cvtss2si %xmm7, %rcx
152 ; X64-SSE-NEXT: movq %rcx, %xmm7
153 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
154 ; X64-SSE-NEXT: movaps %xmm2, %xmm7
155 ; X64-SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm2[3,3]
156 ; X64-SSE-NEXT: cvtss2si %xmm7, %rcx
157 ; X64-SSE-NEXT: movq %rcx, %xmm7
158 ; X64-SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
159 ; X64-SSE-NEXT: cvtss2si %xmm2, %rcx
160 ; X64-SSE-NEXT: movq %rcx, %xmm2
161 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm7[0]
162 ; X64-SSE-NEXT: cvtss2si %xmm3, %rcx
163 ; X64-SSE-NEXT: movq %rcx, %xmm7
164 ; X64-SSE-NEXT: movaps %xmm3, %xmm8
165 ; X64-SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm3[1,1]
166 ; X64-SSE-NEXT: cvtss2si %xmm8, %rcx
167 ; X64-SSE-NEXT: movq %rcx, %xmm8
168 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm8[0]
169 ; X64-SSE-NEXT: movaps %xmm3, %xmm8
170 ; X64-SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,3],xmm3[3,3]
171 ; X64-SSE-NEXT: cvtss2si %xmm8, %rcx
172 ; X64-SSE-NEXT: movq %rcx, %xmm8
173 ; X64-SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
174 ; X64-SSE-NEXT: cvtss2si %xmm3, %rcx
175 ; X64-SSE-NEXT: movq %rcx, %xmm3
176 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm8[0]
177 ; X64-SSE-NEXT: movdqa %xmm3, 112(%rdi)
178 ; X64-SSE-NEXT: movdqa %xmm7, 96(%rdi)
179 ; X64-SSE-NEXT: movdqa %xmm2, 80(%rdi)
180 ; X64-SSE-NEXT: movdqa %xmm6, 64(%rdi)
181 ; X64-SSE-NEXT: movdqa %xmm1, 48(%rdi)
182 ; X64-SSE-NEXT: movdqa %xmm5, 32(%rdi)
183 ; X64-SSE-NEXT: movdqa %xmm0, 16(%rdi)
184 ; X64-SSE-NEXT: movdqa %xmm4, (%rdi)
186 %a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
189 declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
191 define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
192 ; X64-SSE-LABEL: llrint_v1i64_v1f64:
194 ; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
197 ; X64-AVX-LABEL: llrint_v1i64_v1f64:
199 ; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
201 %a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
204 declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
206 define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
207 ; X64-SSE-LABEL: llrint_v2i64_v2f64:
209 ; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
210 ; X64-SSE-NEXT: movq %rax, %xmm1
211 ; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
212 ; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
213 ; X64-SSE-NEXT: movq %rax, %xmm0
214 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
215 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0
218 ; X64-AVX-LABEL: llrint_v2i64_v2f64:
220 ; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
221 ; X64-AVX-NEXT: vmovq %rax, %xmm1
222 ; X64-AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
223 ; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
224 ; X64-AVX-NEXT: vmovq %rax, %xmm0
225 ; X64-AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
227 %a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
230 declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
232 define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
233 ; X64-SSE-LABEL: llrint_v4i64_v4f64:
235 ; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
236 ; X64-SSE-NEXT: movq %rax, %xmm2
237 ; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
238 ; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
239 ; X64-SSE-NEXT: movq %rax, %xmm0
240 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
241 ; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
242 ; X64-SSE-NEXT: movq %rax, %xmm3
243 ; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
244 ; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
245 ; X64-SSE-NEXT: movq %rax, %xmm0
246 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
247 ; X64-SSE-NEXT: movdqa %xmm2, %xmm0
248 ; X64-SSE-NEXT: movdqa %xmm3, %xmm1
250 %a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
253 declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
255 define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
256 ; X64-SSE-LABEL: llrint_v8i64_v8f64:
258 ; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
259 ; X64-SSE-NEXT: movq %rax, %xmm4
260 ; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
261 ; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
262 ; X64-SSE-NEXT: movq %rax, %xmm0
263 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
264 ; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
265 ; X64-SSE-NEXT: movq %rax, %xmm5
266 ; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
267 ; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
268 ; X64-SSE-NEXT: movq %rax, %xmm0
269 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
270 ; X64-SSE-NEXT: cvtsd2si %xmm2, %rax
271 ; X64-SSE-NEXT: movq %rax, %xmm6
272 ; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
273 ; X64-SSE-NEXT: cvtsd2si %xmm2, %rax
274 ; X64-SSE-NEXT: movq %rax, %xmm0
275 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm0[0]
276 ; X64-SSE-NEXT: cvtsd2si %xmm3, %rax
277 ; X64-SSE-NEXT: movq %rax, %xmm7
278 ; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
279 ; X64-SSE-NEXT: cvtsd2si %xmm3, %rax
280 ; X64-SSE-NEXT: movq %rax, %xmm0
281 ; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm0[0]
282 ; X64-SSE-NEXT: movdqa %xmm4, %xmm0
283 ; X64-SSE-NEXT: movdqa %xmm5, %xmm1
284 ; X64-SSE-NEXT: movdqa %xmm6, %xmm2
285 ; X64-SSE-NEXT: movdqa %xmm7, %xmm3
287 %a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
290 declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)