1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE,SSE-32
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE,SSE-64
4 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE41,SSE41-32
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE41,SSE41-64
6 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX,AVX1,AVX-32,AVX1-32
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX,AVX1,AVX-64,AVX1-64
8 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX,AVX512F,AVX-32
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX,AVX512F,AVX-64,AVX512F-64
10 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512vl -O3 | FileCheck %s --check-prefixes=AVX,AVX512VL,AVX-32,AVX512VL-32
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl -O3 | FileCheck %s --check-prefixes=AVX,AVX512VL,AVX-64,AVX512VL-64
12 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512f,avx512dq -O3 | FileCheck %s --check-prefixes=AVX,AVX512DQ,AVX512DQ-32
13 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,avx512dq -O3 | FileCheck %s --check-prefixes=AVX,AVX512DQ,AVX512DQ-64
14 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=AVX,AVX512DQVL,AVX512DQVL-32
15 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=AVX,AVX512DQVL,AVX512DQVL-64
17 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
18 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
19 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
20 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
21 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
22 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
23 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
24 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
25 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
26 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
27 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
28 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
29 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
30 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
31 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
32 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
33 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
34 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
35 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
36 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
37 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
38 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
40 define <2 x float> @sitofp_v2i32_v2f32(<2 x i32> %x) #0 {
41 ; SSE-LABEL: sitofp_v2i32_v2f32:
43 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
44 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
45 ; SSE-NEXT: ret{{[l|q]}}
47 ; SSE41-LABEL: sitofp_v2i32_v2f32:
49 ; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
50 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
51 ; SSE41-NEXT: ret{{[l|q]}}
53 ; AVX-LABEL: sitofp_v2i32_v2f32:
55 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
56 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
57 ; AVX-NEXT: ret{{[l|q]}}
58 %result = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32> %x,
59 metadata !"round.dynamic",
60 metadata !"fpexcept.strict") #0
61 ret <2 x float> %result
64 define <2 x float> @uitofp_v2i32_v2f32(<2 x i32> %x) #0 {
65 ; SSE-LABEL: uitofp_v2i32_v2f32:
67 ; SSE-NEXT: xorpd %xmm1, %xmm1
68 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
69 ; SSE-NEXT: movapd {{.*#+}} xmm1 = [4.503599627370496E+15,4.503599627370496E+15]
70 ; SSE-NEXT: orpd %xmm1, %xmm0
71 ; SSE-NEXT: subpd %xmm1, %xmm0
72 ; SSE-NEXT: cvtpd2ps %xmm0, %xmm0
73 ; SSE-NEXT: ret{{[l|q]}}
75 ; SSE41-LABEL: uitofp_v2i32_v2f32:
77 ; SSE41-NEXT: xorpd %xmm1, %xmm1
78 ; SSE41-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
79 ; SSE41-NEXT: movapd {{.*#+}} xmm1 = [4.503599627370496E+15,4.503599627370496E+15]
80 ; SSE41-NEXT: orpd %xmm1, %xmm0
81 ; SSE41-NEXT: subpd %xmm1, %xmm0
82 ; SSE41-NEXT: cvtpd2ps %xmm0, %xmm0
83 ; SSE41-NEXT: ret{{[l|q]}}
85 ; AVX1-LABEL: uitofp_v2i32_v2f32:
87 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
88 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [4.503599627370496E+15,4.503599627370496E+15]
89 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
90 ; AVX1-NEXT: vsubpd %xmm1, %xmm0, %xmm0
91 ; AVX1-NEXT: vcvtpd2ps %xmm0, %xmm0
92 ; AVX1-NEXT: ret{{[l|q]}}
94 ; AVX512F-LABEL: uitofp_v2i32_v2f32:
96 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
97 ; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
98 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
99 ; AVX512F-NEXT: vzeroupper
100 ; AVX512F-NEXT: ret{{[l|q]}}
102 ; AVX512VL-LABEL: uitofp_v2i32_v2f32:
104 ; AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
105 ; AVX512VL-NEXT: vcvtudq2ps %xmm0, %xmm0
106 ; AVX512VL-NEXT: ret{{[l|q]}}
108 ; AVX512DQ-LABEL: uitofp_v2i32_v2f32:
110 ; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
111 ; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
112 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
113 ; AVX512DQ-NEXT: vzeroupper
114 ; AVX512DQ-NEXT: ret{{[l|q]}}
116 ; AVX512DQVL-LABEL: uitofp_v2i32_v2f32:
117 ; AVX512DQVL: # %bb.0:
118 ; AVX512DQVL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
119 ; AVX512DQVL-NEXT: vcvtudq2ps %xmm0, %xmm0
120 ; AVX512DQVL-NEXT: ret{{[l|q]}}
121 %result = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32> %x,
122 metadata !"round.dynamic",
123 metadata !"fpexcept.strict") #0
124 ret <2 x float> %result
127 define <2 x float> @sitofp_v2i64_v2f32(<2 x i64> %x) #0 {
128 ; SSE-32-LABEL: sitofp_v2i64_v2f32:
130 ; SSE-32-NEXT: pushl %ebp
131 ; SSE-32-NEXT: .cfi_def_cfa_offset 8
132 ; SSE-32-NEXT: .cfi_offset %ebp, -8
133 ; SSE-32-NEXT: movl %esp, %ebp
134 ; SSE-32-NEXT: .cfi_def_cfa_register %ebp
135 ; SSE-32-NEXT: andl $-8, %esp
136 ; SSE-32-NEXT: subl $24, %esp
137 ; SSE-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
138 ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
139 ; SSE-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
140 ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
141 ; SSE-32-NEXT: fstps (%esp)
142 ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
143 ; SSE-32-NEXT: fstps {{[0-9]+}}(%esp)
145 ; SSE-32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
146 ; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
147 ; SSE-32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
148 ; SSE-32-NEXT: movl %ebp, %esp
149 ; SSE-32-NEXT: popl %ebp
150 ; SSE-32-NEXT: .cfi_def_cfa %esp, 4
153 ; SSE-64-LABEL: sitofp_v2i64_v2f32:
155 ; SSE-64-NEXT: movq %xmm0, %rax
156 ; SSE-64-NEXT: cvtsi2ss %rax, %xmm1
157 ; SSE-64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
158 ; SSE-64-NEXT: movq %xmm0, %rax
159 ; SSE-64-NEXT: xorps %xmm0, %xmm0
160 ; SSE-64-NEXT: cvtsi2ss %rax, %xmm0
161 ; SSE-64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
162 ; SSE-64-NEXT: movaps %xmm1, %xmm0
165 ; SSE41-32-LABEL: sitofp_v2i64_v2f32:
167 ; SSE41-32-NEXT: pushl %ebp
168 ; SSE41-32-NEXT: .cfi_def_cfa_offset 8
169 ; SSE41-32-NEXT: .cfi_offset %ebp, -8
170 ; SSE41-32-NEXT: movl %esp, %ebp
171 ; SSE41-32-NEXT: .cfi_def_cfa_register %ebp
172 ; SSE41-32-NEXT: andl $-8, %esp
173 ; SSE41-32-NEXT: subl $24, %esp
174 ; SSE41-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
175 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
176 ; SSE41-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
177 ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
178 ; SSE41-32-NEXT: fstps (%esp)
179 ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
180 ; SSE41-32-NEXT: fstps {{[0-9]+}}(%esp)
181 ; SSE41-32-NEXT: wait
182 ; SSE41-32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
183 ; SSE41-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
184 ; SSE41-32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
185 ; SSE41-32-NEXT: movl %ebp, %esp
186 ; SSE41-32-NEXT: popl %ebp
187 ; SSE41-32-NEXT: .cfi_def_cfa %esp, 4
188 ; SSE41-32-NEXT: retl
190 ; SSE41-64-LABEL: sitofp_v2i64_v2f32:
192 ; SSE41-64-NEXT: movq %xmm0, %rax
193 ; SSE41-64-NEXT: cvtsi2ss %rax, %xmm1
194 ; SSE41-64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
195 ; SSE41-64-NEXT: movq %xmm0, %rax
196 ; SSE41-64-NEXT: xorps %xmm0, %xmm0
197 ; SSE41-64-NEXT: cvtsi2ss %rax, %xmm0
198 ; SSE41-64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
199 ; SSE41-64-NEXT: movaps %xmm1, %xmm0
200 ; SSE41-64-NEXT: retq
202 ; AVX-32-LABEL: sitofp_v2i64_v2f32:
204 ; AVX-32-NEXT: pushl %ebp
205 ; AVX-32-NEXT: .cfi_def_cfa_offset 8
206 ; AVX-32-NEXT: .cfi_offset %ebp, -8
207 ; AVX-32-NEXT: movl %esp, %ebp
208 ; AVX-32-NEXT: .cfi_def_cfa_register %ebp
209 ; AVX-32-NEXT: andl $-8, %esp
210 ; AVX-32-NEXT: subl $24, %esp
211 ; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
212 ; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
213 ; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
214 ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
215 ; AVX-32-NEXT: fstps (%esp)
216 ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
217 ; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
219 ; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
220 ; AVX-32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
221 ; AVX-32-NEXT: movl %ebp, %esp
222 ; AVX-32-NEXT: popl %ebp
223 ; AVX-32-NEXT: .cfi_def_cfa %esp, 4
226 ; AVX-64-LABEL: sitofp_v2i64_v2f32:
228 ; AVX-64-NEXT: vpextrq $1, %xmm0, %rax
229 ; AVX-64-NEXT: vcvtsi2ss %rax, %xmm1, %xmm1
230 ; AVX-64-NEXT: vmovq %xmm0, %rax
231 ; AVX-64-NEXT: vcvtsi2ss %rax, %xmm2, %xmm0
232 ; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
235 ; AVX512DQ-32-LABEL: sitofp_v2i64_v2f32:
236 ; AVX512DQ-32: # %bb.0:
237 ; AVX512DQ-32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
238 ; AVX512DQ-32-NEXT: vcvtqq2ps %zmm0, %ymm1
239 ; AVX512DQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
240 ; AVX512DQ-32-NEXT: vcvtqq2ps %zmm0, %ymm0
241 ; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
242 ; AVX512DQ-32-NEXT: vzeroupper
243 ; AVX512DQ-32-NEXT: retl
245 ; AVX512DQ-64-LABEL: sitofp_v2i64_v2f32:
246 ; AVX512DQ-64: # %bb.0:
247 ; AVX512DQ-64-NEXT: vpextrq $1, %xmm0, %rax
248 ; AVX512DQ-64-NEXT: vcvtsi2ss %rax, %xmm1, %xmm1
249 ; AVX512DQ-64-NEXT: vmovq %xmm0, %rax
250 ; AVX512DQ-64-NEXT: vcvtsi2ss %rax, %xmm2, %xmm0
251 ; AVX512DQ-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
252 ; AVX512DQ-64-NEXT: retq
254 ; AVX512DQVL-LABEL: sitofp_v2i64_v2f32:
255 ; AVX512DQVL: # %bb.0:
256 ; AVX512DQVL-NEXT: vcvtqq2ps %xmm0, %xmm0
257 ; AVX512DQVL-NEXT: ret{{[l|q]}}
258 %result = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64> %x,
259 metadata !"round.dynamic",
260 metadata !"fpexcept.strict") #0
261 ret <2 x float> %result
264 define <2 x float> @uitofp_v2i64_v2f32(<2 x i64> %x) #0 {
265 ; SSE-32-LABEL: uitofp_v2i64_v2f32:
267 ; SSE-32-NEXT: pushl %ebp
268 ; SSE-32-NEXT: .cfi_def_cfa_offset 8
269 ; SSE-32-NEXT: .cfi_offset %ebp, -8
270 ; SSE-32-NEXT: movl %esp, %ebp
271 ; SSE-32-NEXT: .cfi_def_cfa_register %ebp
272 ; SSE-32-NEXT: andl $-8, %esp
273 ; SSE-32-NEXT: subl $24, %esp
274 ; SSE-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
275 ; SSE-32-NEXT: movq %xmm1, {{[0-9]+}}(%esp)
276 ; SSE-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
277 ; SSE-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
278 ; SSE-32-NEXT: movd %xmm1, %eax
279 ; SSE-32-NEXT: shrl $31, %eax
280 ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
281 ; SSE-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
282 ; SSE-32-NEXT: fstps (%esp)
284 ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
285 ; SSE-32-NEXT: movd %xmm0, %eax
286 ; SSE-32-NEXT: shrl $31, %eax
287 ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
288 ; SSE-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
289 ; SSE-32-NEXT: fstps {{[0-9]+}}(%esp)
291 ; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
292 ; SSE-32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
293 ; SSE-32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
294 ; SSE-32-NEXT: movl %ebp, %esp
295 ; SSE-32-NEXT: popl %ebp
296 ; SSE-32-NEXT: .cfi_def_cfa %esp, 4
299 ; SSE-64-LABEL: uitofp_v2i64_v2f32:
301 ; SSE-64-NEXT: movdqa %xmm0, %xmm1
302 ; SSE-64-NEXT: movq %xmm0, %rax
303 ; SSE-64-NEXT: movq %rax, %rcx
304 ; SSE-64-NEXT: shrq %rcx
305 ; SSE-64-NEXT: movl %eax, %edx
306 ; SSE-64-NEXT: andl $1, %edx
307 ; SSE-64-NEXT: orq %rcx, %rdx
308 ; SSE-64-NEXT: testq %rax, %rax
309 ; SSE-64-NEXT: cmovnsq %rax, %rdx
310 ; SSE-64-NEXT: xorps %xmm0, %xmm0
311 ; SSE-64-NEXT: cvtsi2ss %rdx, %xmm0
312 ; SSE-64-NEXT: jns .LBB3_2
313 ; SSE-64-NEXT: # %bb.1:
314 ; SSE-64-NEXT: addss %xmm0, %xmm0
315 ; SSE-64-NEXT: .LBB3_2:
316 ; SSE-64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
317 ; SSE-64-NEXT: movq %xmm1, %rax
318 ; SSE-64-NEXT: movq %rax, %rcx
319 ; SSE-64-NEXT: shrq %rcx
320 ; SSE-64-NEXT: movl %eax, %edx
321 ; SSE-64-NEXT: andl $1, %edx
322 ; SSE-64-NEXT: orq %rcx, %rdx
323 ; SSE-64-NEXT: testq %rax, %rax
324 ; SSE-64-NEXT: cmovnsq %rax, %rdx
325 ; SSE-64-NEXT: xorps %xmm1, %xmm1
326 ; SSE-64-NEXT: cvtsi2ss %rdx, %xmm1
327 ; SSE-64-NEXT: jns .LBB3_4
328 ; SSE-64-NEXT: # %bb.3:
329 ; SSE-64-NEXT: addss %xmm1, %xmm1
330 ; SSE-64-NEXT: .LBB3_4:
331 ; SSE-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
334 ; SSE41-32-LABEL: uitofp_v2i64_v2f32:
336 ; SSE41-32-NEXT: pushl %ebp
337 ; SSE41-32-NEXT: .cfi_def_cfa_offset 8
338 ; SSE41-32-NEXT: .cfi_offset %ebp, -8
339 ; SSE41-32-NEXT: movl %esp, %ebp
340 ; SSE41-32-NEXT: .cfi_def_cfa_register %ebp
341 ; SSE41-32-NEXT: andl $-8, %esp
342 ; SSE41-32-NEXT: subl $24, %esp
343 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
344 ; SSE41-32-NEXT: movq %xmm1, {{[0-9]+}}(%esp)
345 ; SSE41-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
346 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
347 ; SSE41-32-NEXT: movd %xmm1, %eax
348 ; SSE41-32-NEXT: shrl $31, %eax
349 ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
350 ; SSE41-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
351 ; SSE41-32-NEXT: fstps (%esp)
352 ; SSE41-32-NEXT: wait
353 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
354 ; SSE41-32-NEXT: movd %xmm0, %eax
355 ; SSE41-32-NEXT: shrl $31, %eax
356 ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
357 ; SSE41-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
358 ; SSE41-32-NEXT: fstps {{[0-9]+}}(%esp)
359 ; SSE41-32-NEXT: wait
360 ; SSE41-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
361 ; SSE41-32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
362 ; SSE41-32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
363 ; SSE41-32-NEXT: movl %ebp, %esp
364 ; SSE41-32-NEXT: popl %ebp
365 ; SSE41-32-NEXT: .cfi_def_cfa %esp, 4
366 ; SSE41-32-NEXT: retl
368 ; SSE41-64-LABEL: uitofp_v2i64_v2f32:
370 ; SSE41-64-NEXT: movdqa %xmm0, %xmm1
371 ; SSE41-64-NEXT: movq %xmm0, %rax
372 ; SSE41-64-NEXT: movq %rax, %rcx
373 ; SSE41-64-NEXT: shrq %rcx
374 ; SSE41-64-NEXT: movl %eax, %edx
375 ; SSE41-64-NEXT: andl $1, %edx
376 ; SSE41-64-NEXT: orq %rcx, %rdx
377 ; SSE41-64-NEXT: testq %rax, %rax
378 ; SSE41-64-NEXT: cmovnsq %rax, %rdx
379 ; SSE41-64-NEXT: xorps %xmm0, %xmm0
380 ; SSE41-64-NEXT: cvtsi2ss %rdx, %xmm0
381 ; SSE41-64-NEXT: jns .LBB3_2
382 ; SSE41-64-NEXT: # %bb.1:
383 ; SSE41-64-NEXT: addss %xmm0, %xmm0
384 ; SSE41-64-NEXT: .LBB3_2:
385 ; SSE41-64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
386 ; SSE41-64-NEXT: movq %xmm1, %rax
387 ; SSE41-64-NEXT: movq %rax, %rcx
388 ; SSE41-64-NEXT: shrq %rcx
389 ; SSE41-64-NEXT: movl %eax, %edx
390 ; SSE41-64-NEXT: andl $1, %edx
391 ; SSE41-64-NEXT: orq %rcx, %rdx
392 ; SSE41-64-NEXT: testq %rax, %rax
393 ; SSE41-64-NEXT: cmovnsq %rax, %rdx
394 ; SSE41-64-NEXT: xorps %xmm1, %xmm1
395 ; SSE41-64-NEXT: cvtsi2ss %rdx, %xmm1
396 ; SSE41-64-NEXT: jns .LBB3_4
397 ; SSE41-64-NEXT: # %bb.3:
398 ; SSE41-64-NEXT: addss %xmm1, %xmm1
399 ; SSE41-64-NEXT: .LBB3_4:
400 ; SSE41-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
401 ; SSE41-64-NEXT: retq
403 ; AVX-32-LABEL: uitofp_v2i64_v2f32:
405 ; AVX-32-NEXT: pushl %ebp
406 ; AVX-32-NEXT: .cfi_def_cfa_offset 8
407 ; AVX-32-NEXT: .cfi_offset %ebp, -8
408 ; AVX-32-NEXT: movl %esp, %ebp
409 ; AVX-32-NEXT: .cfi_def_cfa_register %ebp
410 ; AVX-32-NEXT: andl $-8, %esp
411 ; AVX-32-NEXT: subl $24, %esp
412 ; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
413 ; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
414 ; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
415 ; AVX-32-NEXT: vextractps $1, %xmm0, %eax
416 ; AVX-32-NEXT: shrl $31, %eax
417 ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
418 ; AVX-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
419 ; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
421 ; AVX-32-NEXT: vextractps $3, %xmm0, %eax
422 ; AVX-32-NEXT: shrl $31, %eax
423 ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
424 ; AVX-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
425 ; AVX-32-NEXT: fstps (%esp)
427 ; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
428 ; AVX-32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
429 ; AVX-32-NEXT: movl %ebp, %esp
430 ; AVX-32-NEXT: popl %ebp
431 ; AVX-32-NEXT: .cfi_def_cfa %esp, 4
434 ; AVX1-64-LABEL: uitofp_v2i64_v2f32:
436 ; AVX1-64-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
437 ; AVX1-64-NEXT: vpsrlq $1, %xmm0, %xmm2
438 ; AVX1-64-NEXT: vpor %xmm1, %xmm2, %xmm1
439 ; AVX1-64-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm1
440 ; AVX1-64-NEXT: vpextrq $1, %xmm1, %rax
441 ; AVX1-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2
442 ; AVX1-64-NEXT: vmovq %xmm1, %rax
443 ; AVX1-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm1
444 ; AVX1-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],zero,zero
445 ; AVX1-64-NEXT: vaddps %xmm1, %xmm1, %xmm2
446 ; AVX1-64-NEXT: vpxor %xmm3, %xmm3, %xmm3
447 ; AVX1-64-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0
448 ; AVX1-64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
449 ; AVX1-64-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
452 ; AVX512F-64-LABEL: uitofp_v2i64_v2f32:
453 ; AVX512F-64: # %bb.0:
454 ; AVX512F-64-NEXT: vpextrq $1, %xmm0, %rax
455 ; AVX512F-64-NEXT: vcvtusi2ss %rax, %xmm1, %xmm1
456 ; AVX512F-64-NEXT: vmovq %xmm0, %rax
457 ; AVX512F-64-NEXT: vcvtusi2ss %rax, %xmm2, %xmm0
458 ; AVX512F-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
459 ; AVX512F-64-NEXT: retq
461 ; AVX512VL-64-LABEL: uitofp_v2i64_v2f32:
462 ; AVX512VL-64: # %bb.0:
463 ; AVX512VL-64-NEXT: vpextrq $1, %xmm0, %rax
464 ; AVX512VL-64-NEXT: vcvtusi2ss %rax, %xmm1, %xmm1
465 ; AVX512VL-64-NEXT: vmovq %xmm0, %rax
466 ; AVX512VL-64-NEXT: vcvtusi2ss %rax, %xmm2, %xmm0
467 ; AVX512VL-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
468 ; AVX512VL-64-NEXT: retq
470 ; AVX512DQ-32-LABEL: uitofp_v2i64_v2f32:
471 ; AVX512DQ-32: # %bb.0:
472 ; AVX512DQ-32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
473 ; AVX512DQ-32-NEXT: vcvtuqq2ps %zmm0, %ymm1
474 ; AVX512DQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
475 ; AVX512DQ-32-NEXT: vcvtuqq2ps %zmm0, %ymm0
476 ; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
477 ; AVX512DQ-32-NEXT: vzeroupper
478 ; AVX512DQ-32-NEXT: retl
480 ; AVX512DQ-64-LABEL: uitofp_v2i64_v2f32:
481 ; AVX512DQ-64: # %bb.0:
482 ; AVX512DQ-64-NEXT: vpextrq $1, %xmm0, %rax
483 ; AVX512DQ-64-NEXT: vcvtusi2ss %rax, %xmm1, %xmm1
484 ; AVX512DQ-64-NEXT: vmovq %xmm0, %rax
485 ; AVX512DQ-64-NEXT: vcvtusi2ss %rax, %xmm2, %xmm0
486 ; AVX512DQ-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
487 ; AVX512DQ-64-NEXT: retq
489 ; AVX512DQVL-LABEL: uitofp_v2i64_v2f32:
490 ; AVX512DQVL: # %bb.0:
491 ; AVX512DQVL-NEXT: vcvtuqq2ps %xmm0, %xmm0
492 ; AVX512DQVL-NEXT: ret{{[l|q]}}
493 %result = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64> %x,
494 metadata !"round.dynamic",
495 metadata !"fpexcept.strict") #0
496 ret <2 x float> %result
499 define <4 x float> @sitofp_v4i1_v4f32(<4 x i1> %x) #0 {
500 ; SSE-LABEL: sitofp_v4i1_v4f32:
502 ; SSE-NEXT: pslld $31, %xmm0
503 ; SSE-NEXT: psrad $31, %xmm0
504 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
505 ; SSE-NEXT: ret{{[l|q]}}
507 ; SSE41-LABEL: sitofp_v4i1_v4f32:
509 ; SSE41-NEXT: pslld $31, %xmm0
510 ; SSE41-NEXT: psrad $31, %xmm0
511 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
512 ; SSE41-NEXT: ret{{[l|q]}}
514 ; AVX-LABEL: sitofp_v4i1_v4f32:
516 ; AVX-NEXT: vpslld $31, %xmm0, %xmm0
517 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
518 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
519 ; AVX-NEXT: ret{{[l|q]}}
520 %result = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1> %x,
521 metadata !"round.dynamic",
522 metadata !"fpexcept.strict") #0
523 ret <4 x float> %result
526 define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
527 ; SSE-32-LABEL: uitofp_v4i1_v4f32:
529 ; SSE-32-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
530 ; SSE-32-NEXT: cvtdq2ps %xmm0, %xmm0
533 ; SSE-64-LABEL: uitofp_v4i1_v4f32:
535 ; SSE-64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
536 ; SSE-64-NEXT: cvtdq2ps %xmm0, %xmm0
539 ; SSE41-32-LABEL: uitofp_v4i1_v4f32:
541 ; SSE41-32-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
542 ; SSE41-32-NEXT: cvtdq2ps %xmm0, %xmm0
543 ; SSE41-32-NEXT: retl
545 ; SSE41-64-LABEL: uitofp_v4i1_v4f32:
547 ; SSE41-64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
548 ; SSE41-64-NEXT: cvtdq2ps %xmm0, %xmm0
549 ; SSE41-64-NEXT: retq
551 ; AVX1-32-LABEL: uitofp_v4i1_v4f32:
553 ; AVX1-32-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
554 ; AVX1-32-NEXT: vcvtdq2ps %xmm0, %xmm0
557 ; AVX1-64-LABEL: uitofp_v4i1_v4f32:
559 ; AVX1-64-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
560 ; AVX1-64-NEXT: vcvtdq2ps %xmm0, %xmm0
563 ; AVX512F-LABEL: uitofp_v4i1_v4f32:
565 ; AVX512F-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
566 ; AVX512F-NEXT: vandps %xmm1, %xmm0, %xmm0
567 ; AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0
568 ; AVX512F-NEXT: ret{{[l|q]}}
570 ; AVX512VL-32-LABEL: uitofp_v4i1_v4f32:
571 ; AVX512VL-32: # %bb.0:
572 ; AVX512VL-32-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
573 ; AVX512VL-32-NEXT: vcvtdq2ps %xmm0, %xmm0
574 ; AVX512VL-32-NEXT: retl
576 ; AVX512VL-64-LABEL: uitofp_v4i1_v4f32:
577 ; AVX512VL-64: # %bb.0:
578 ; AVX512VL-64-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
579 ; AVX512VL-64-NEXT: vcvtdq2ps %xmm0, %xmm0
580 ; AVX512VL-64-NEXT: retq
582 ; AVX512DQ-LABEL: uitofp_v4i1_v4f32:
584 ; AVX512DQ-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
585 ; AVX512DQ-NEXT: vandps %xmm1, %xmm0, %xmm0
586 ; AVX512DQ-NEXT: vcvtdq2ps %xmm0, %xmm0
587 ; AVX512DQ-NEXT: ret{{[l|q]}}
589 ; AVX512DQVL-32-LABEL: uitofp_v4i1_v4f32:
590 ; AVX512DQVL-32: # %bb.0:
591 ; AVX512DQVL-32-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
592 ; AVX512DQVL-32-NEXT: vcvtdq2ps %xmm0, %xmm0
593 ; AVX512DQVL-32-NEXT: retl
595 ; AVX512DQVL-64-LABEL: uitofp_v4i1_v4f32:
596 ; AVX512DQVL-64: # %bb.0:
597 ; AVX512DQVL-64-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
598 ; AVX512DQVL-64-NEXT: vcvtdq2ps %xmm0, %xmm0
599 ; AVX512DQVL-64-NEXT: retq
600 %result = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1> %x,
601 metadata !"round.dynamic",
602 metadata !"fpexcept.strict") #0
603 ret <4 x float> %result
606 define <4 x float> @sitofp_v4i8_v4f32(<4 x i8> %x) #0 {
607 ; SSE-LABEL: sitofp_v4i8_v4f32:
609 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
610 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
611 ; SSE-NEXT: psrad $24, %xmm0
612 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
613 ; SSE-NEXT: ret{{[l|q]}}
615 ; SSE41-LABEL: sitofp_v4i8_v4f32:
617 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
618 ; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
619 ; SSE41-NEXT: psrad $24, %xmm0
620 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
621 ; SSE41-NEXT: ret{{[l|q]}}
623 ; AVX-LABEL: sitofp_v4i8_v4f32:
625 ; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
626 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
627 ; AVX-NEXT: ret{{[l|q]}}
628 %result = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8> %x,
629 metadata !"round.dynamic",
630 metadata !"fpexcept.strict") #0
631 ret <4 x float> %result
634 define <4 x float> @uitofp_v4i8_v4f32(<4 x i8> %x) #0 {
635 ; SSE-LABEL: uitofp_v4i8_v4f32:
637 ; SSE-NEXT: pxor %xmm1, %xmm1
638 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
639 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
640 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
641 ; SSE-NEXT: ret{{[l|q]}}
643 ; SSE41-LABEL: uitofp_v4i8_v4f32:
645 ; SSE41-NEXT: pxor %xmm1, %xmm1
646 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
647 ; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
648 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
649 ; SSE41-NEXT: ret{{[l|q]}}
651 ; AVX-LABEL: uitofp_v4i8_v4f32:
653 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
654 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
655 ; AVX-NEXT: ret{{[l|q]}}
656 %result = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8> %x,
657 metadata !"round.dynamic",
658 metadata !"fpexcept.strict") #0
659 ret <4 x float> %result
662 define <4 x float> @sitofp_v4i16_v4f32(<4 x i16> %x) #0 {
663 ; SSE-LABEL: sitofp_v4i16_v4f32:
665 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
666 ; SSE-NEXT: psrad $16, %xmm0
667 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
668 ; SSE-NEXT: ret{{[l|q]}}
670 ; SSE41-LABEL: sitofp_v4i16_v4f32:
672 ; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
673 ; SSE41-NEXT: psrad $16, %xmm0
674 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
675 ; SSE41-NEXT: ret{{[l|q]}}
677 ; AVX-LABEL: sitofp_v4i16_v4f32:
679 ; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
680 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
681 ; AVX-NEXT: ret{{[l|q]}}
682 %result = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16> %x,
683 metadata !"round.dynamic",
684 metadata !"fpexcept.strict") #0
685 ret <4 x float> %result
688 define <4 x float> @uitofp_v4i16_v4f32(<4 x i16> %x) #0 {
689 ; SSE-LABEL: uitofp_v4i16_v4f32:
691 ; SSE-NEXT: pxor %xmm1, %xmm1
692 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
693 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
694 ; SSE-NEXT: ret{{[l|q]}}
696 ; SSE41-LABEL: uitofp_v4i16_v4f32:
698 ; SSE41-NEXT: pxor %xmm1, %xmm1
699 ; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
700 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
701 ; SSE41-NEXT: ret{{[l|q]}}
703 ; AVX-LABEL: uitofp_v4i16_v4f32:
705 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
706 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
707 ; AVX-NEXT: ret{{[l|q]}}
708 %result = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16> %x,
709 metadata !"round.dynamic",
710 metadata !"fpexcept.strict") #0
711 ret <4 x float> %result
714 define <4 x float> @sitofp_v4i32_v4f32(<4 x i32> %x) #0 {
715 ; SSE-LABEL: sitofp_v4i32_v4f32:
717 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
718 ; SSE-NEXT: ret{{[l|q]}}
720 ; SSE41-LABEL: sitofp_v4i32_v4f32:
722 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
723 ; SSE41-NEXT: ret{{[l|q]}}
725 ; AVX-LABEL: sitofp_v4i32_v4f32:
727 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
728 ; AVX-NEXT: ret{{[l|q]}}
729 %result = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %x,
730 metadata !"round.dynamic",
731 metadata !"fpexcept.strict") #0
732 ret <4 x float> %result
735 define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
736 ; SSE-32-LABEL: uitofp_v4i32_v4f32:
738 ; SSE-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
739 ; SSE-32-NEXT: pand %xmm0, %xmm1
740 ; SSE-32-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
741 ; SSE-32-NEXT: psrld $16, %xmm0
742 ; SSE-32-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
743 ; SSE-32-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
744 ; SSE-32-NEXT: addps %xmm1, %xmm0
747 ; SSE-64-LABEL: uitofp_v4i32_v4f32:
749 ; SSE-64-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
750 ; SSE-64-NEXT: pand %xmm0, %xmm1
751 ; SSE-64-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
752 ; SSE-64-NEXT: psrld $16, %xmm0
753 ; SSE-64-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
754 ; SSE-64-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
755 ; SSE-64-NEXT: addps %xmm1, %xmm0
758 ; SSE41-32-LABEL: uitofp_v4i32_v4f32:
760 ; SSE41-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
761 ; SSE41-32-NEXT: pand %xmm0, %xmm1
762 ; SSE41-32-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
763 ; SSE41-32-NEXT: psrld $16, %xmm0
764 ; SSE41-32-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
765 ; SSE41-32-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
766 ; SSE41-32-NEXT: addps %xmm1, %xmm0
767 ; SSE41-32-NEXT: retl
769 ; SSE41-64-LABEL: uitofp_v4i32_v4f32:
771 ; SSE41-64-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
772 ; SSE41-64-NEXT: pand %xmm0, %xmm1
773 ; SSE41-64-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
774 ; SSE41-64-NEXT: psrld $16, %xmm0
775 ; SSE41-64-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
776 ; SSE41-64-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
777 ; SSE41-64-NEXT: addps %xmm1, %xmm0
778 ; SSE41-64-NEXT: retq
780 ; AVX1-32-LABEL: uitofp_v4i32_v4f32:
782 ; AVX1-32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
783 ; AVX1-32-NEXT: vpsrld $16, %xmm0, %xmm0
784 ; AVX1-32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
785 ; AVX1-32-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
786 ; AVX1-32-NEXT: vaddps %xmm0, %xmm1, %xmm0
789 ; AVX1-64-LABEL: uitofp_v4i32_v4f32:
791 ; AVX1-64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
792 ; AVX1-64-NEXT: vpsrld $16, %xmm0, %xmm0
793 ; AVX1-64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
794 ; AVX1-64-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
795 ; AVX1-64-NEXT: vaddps %xmm0, %xmm1, %xmm0
798 ; AVX512F-LABEL: uitofp_v4i32_v4f32:
800 ; AVX512F-NEXT: vmovaps %xmm0, %xmm0
801 ; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
802 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
803 ; AVX512F-NEXT: vzeroupper
804 ; AVX512F-NEXT: ret{{[l|q]}}
806 ; AVX512VL-LABEL: uitofp_v4i32_v4f32:
808 ; AVX512VL-NEXT: vcvtudq2ps %xmm0, %xmm0
809 ; AVX512VL-NEXT: ret{{[l|q]}}
811 ; AVX512DQ-LABEL: uitofp_v4i32_v4f32:
813 ; AVX512DQ-NEXT: vmovaps %xmm0, %xmm0
814 ; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
815 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
816 ; AVX512DQ-NEXT: vzeroupper
817 ; AVX512DQ-NEXT: ret{{[l|q]}}
819 ; AVX512DQVL-LABEL: uitofp_v4i32_v4f32:
820 ; AVX512DQVL: # %bb.0:
821 ; AVX512DQVL-NEXT: vcvtudq2ps %xmm0, %xmm0
822 ; AVX512DQVL-NEXT: ret{{[l|q]}}
823 %result = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %x,
824 metadata !"round.dynamic",
825 metadata !"fpexcept.strict") #0
826 ret <4 x float> %result
829 define <2 x double> @sitofp_v2i1_v2f64(<2 x i1> %x) #0 {
830 ; SSE-LABEL: sitofp_v2i1_v2f64:
832 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
833 ; SSE-NEXT: pslld $31, %xmm0
834 ; SSE-NEXT: psrad $31, %xmm0
835 ; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
836 ; SSE-NEXT: ret{{[l|q]}}
838 ; SSE41-LABEL: sitofp_v2i1_v2f64:
840 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
841 ; SSE41-NEXT: pslld $31, %xmm0
842 ; SSE41-NEXT: psrad $31, %xmm0
843 ; SSE41-NEXT: cvtdq2pd %xmm0, %xmm0
844 ; SSE41-NEXT: ret{{[l|q]}}
846 ; AVX-LABEL: sitofp_v2i1_v2f64:
848 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
849 ; AVX-NEXT: vpslld $31, %xmm0, %xmm0
850 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
851 ; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
852 ; AVX-NEXT: ret{{[l|q]}}
853 %result = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1> %x,
854 metadata !"round.dynamic",
855 metadata !"fpexcept.strict") #0
856 ret <2 x double> %result
859 define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
860 ; SSE-32-LABEL: uitofp_v2i1_v2f64:
862 ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
863 ; SSE-32-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
864 ; SSE-32-NEXT: cvtdq2pd %xmm0, %xmm0
867 ; SSE-64-LABEL: uitofp_v2i1_v2f64:
869 ; SSE-64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
870 ; SSE-64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
871 ; SSE-64-NEXT: cvtdq2pd %xmm0, %xmm0
874 ; SSE41-32-LABEL: uitofp_v2i1_v2f64:
876 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
877 ; SSE41-32-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
878 ; SSE41-32-NEXT: cvtdq2pd %xmm0, %xmm0
879 ; SSE41-32-NEXT: retl
881 ; SSE41-64-LABEL: uitofp_v2i1_v2f64:
883 ; SSE41-64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
884 ; SSE41-64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
885 ; SSE41-64-NEXT: cvtdq2pd %xmm0, %xmm0
886 ; SSE41-64-NEXT: retq
888 ; AVX1-32-LABEL: uitofp_v2i1_v2f64:
890 ; AVX1-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
891 ; AVX1-32-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
892 ; AVX1-32-NEXT: vcvtdq2pd %xmm0, %xmm0
895 ; AVX1-64-LABEL: uitofp_v2i1_v2f64:
897 ; AVX1-64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
898 ; AVX1-64-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
899 ; AVX1-64-NEXT: vcvtdq2pd %xmm0, %xmm0
902 ; AVX512F-LABEL: uitofp_v2i1_v2f64:
904 ; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
905 ; AVX512F-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
906 ; AVX512F-NEXT: vandps %xmm1, %xmm0, %xmm0
907 ; AVX512F-NEXT: vcvtdq2pd %xmm0, %xmm0
908 ; AVX512F-NEXT: ret{{[l|q]}}
910 ; AVX512VL-32-LABEL: uitofp_v2i1_v2f64:
911 ; AVX512VL-32: # %bb.0:
912 ; AVX512VL-32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
913 ; AVX512VL-32-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
914 ; AVX512VL-32-NEXT: vcvtdq2pd %xmm0, %xmm0
915 ; AVX512VL-32-NEXT: retl
917 ; AVX512VL-64-LABEL: uitofp_v2i1_v2f64:
918 ; AVX512VL-64: # %bb.0:
919 ; AVX512VL-64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
920 ; AVX512VL-64-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
921 ; AVX512VL-64-NEXT: vcvtdq2pd %xmm0, %xmm0
922 ; AVX512VL-64-NEXT: retq
924 ; AVX512DQ-LABEL: uitofp_v2i1_v2f64:
926 ; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
927 ; AVX512DQ-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
928 ; AVX512DQ-NEXT: vandps %xmm1, %xmm0, %xmm0
929 ; AVX512DQ-NEXT: vcvtdq2pd %xmm0, %xmm0
930 ; AVX512DQ-NEXT: ret{{[l|q]}}
932 ; AVX512DQVL-32-LABEL: uitofp_v2i1_v2f64:
933 ; AVX512DQVL-32: # %bb.0:
934 ; AVX512DQVL-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
935 ; AVX512DQVL-32-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
936 ; AVX512DQVL-32-NEXT: vcvtdq2pd %xmm0, %xmm0
937 ; AVX512DQVL-32-NEXT: retl
939 ; AVX512DQVL-64-LABEL: uitofp_v2i1_v2f64:
940 ; AVX512DQVL-64: # %bb.0:
941 ; AVX512DQVL-64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
942 ; AVX512DQVL-64-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
943 ; AVX512DQVL-64-NEXT: vcvtdq2pd %xmm0, %xmm0
944 ; AVX512DQVL-64-NEXT: retq
945 %result = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1> %x,
946 metadata !"round.dynamic",
947 metadata !"fpexcept.strict") #0
948 ret <2 x double> %result
951 define <2 x double> @sitofp_v2i8_v2f64(<2 x i8> %x) #0 {
952 ; SSE-LABEL: sitofp_v2i8_v2f64:
954 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
955 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
956 ; SSE-NEXT: psrad $24, %xmm0
957 ; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
958 ; SSE-NEXT: ret{{[l|q]}}
960 ; SSE41-LABEL: sitofp_v2i8_v2f64:
962 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
963 ; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
964 ; SSE41-NEXT: psrad $24, %xmm0
965 ; SSE41-NEXT: cvtdq2pd %xmm0, %xmm0
966 ; SSE41-NEXT: ret{{[l|q]}}
968 ; AVX-LABEL: sitofp_v2i8_v2f64:
970 ; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
971 ; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
972 ; AVX-NEXT: ret{{[l|q]}}
973 %result = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8> %x,
974 metadata !"round.dynamic",
975 metadata !"fpexcept.strict") #0
976 ret <2 x double> %result
979 define <2 x double> @uitofp_v2i8_v2f64(<2 x i8> %x) #0 {
980 ; SSE-LABEL: uitofp_v2i8_v2f64:
982 ; SSE-NEXT: pxor %xmm1, %xmm1
983 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
984 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
985 ; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
986 ; SSE-NEXT: ret{{[l|q]}}
988 ; SSE41-LABEL: uitofp_v2i8_v2f64:
990 ; SSE41-NEXT: pxor %xmm1, %xmm1
991 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
992 ; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
993 ; SSE41-NEXT: cvtdq2pd %xmm0, %xmm0
994 ; SSE41-NEXT: ret{{[l|q]}}
996 ; AVX-LABEL: uitofp_v2i8_v2f64:
998 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
999 ; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
1000 ; AVX-NEXT: ret{{[l|q]}}
1001 %result = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8> %x,
1002 metadata !"round.dynamic",
1003 metadata !"fpexcept.strict") #0
1004 ret <2 x double> %result
1007 define <2 x double> @sitofp_v2i16_v2f64(<2 x i16> %x) #0 {
1008 ; SSE-LABEL: sitofp_v2i16_v2f64:
1010 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1011 ; SSE-NEXT: psrad $16, %xmm0
1012 ; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
1013 ; SSE-NEXT: ret{{[l|q]}}
1015 ; SSE41-LABEL: sitofp_v2i16_v2f64:
1017 ; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
1018 ; SSE41-NEXT: psrad $16, %xmm0
1019 ; SSE41-NEXT: cvtdq2pd %xmm0, %xmm0
1020 ; SSE41-NEXT: ret{{[l|q]}}
1022 ; AVX-LABEL: sitofp_v2i16_v2f64:
1024 ; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
1025 ; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
1026 ; AVX-NEXT: ret{{[l|q]}}
1027 %result = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %x,
1028 metadata !"round.dynamic",
1029 metadata !"fpexcept.strict") #0
1030 ret <2 x double> %result
1033 define <2 x double> @uitofp_v2i16_v2f64(<2 x i16> %x) #0 {
1034 ; SSE-LABEL: uitofp_v2i16_v2f64:
1036 ; SSE-NEXT: pxor %xmm1, %xmm1
1037 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1038 ; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
1039 ; SSE-NEXT: ret{{[l|q]}}
1041 ; SSE41-LABEL: uitofp_v2i16_v2f64:
1043 ; SSE41-NEXT: pxor %xmm1, %xmm1
1044 ; SSE41-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1045 ; SSE41-NEXT: cvtdq2pd %xmm0, %xmm0
1046 ; SSE41-NEXT: ret{{[l|q]}}
1048 ; AVX-LABEL: uitofp_v2i16_v2f64:
1050 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1051 ; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
1052 ; AVX-NEXT: ret{{[l|q]}}
1053 %result = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16> %x,
1054 metadata !"round.dynamic",
1055 metadata !"fpexcept.strict") #0
1056 ret <2 x double> %result
1059 define <2 x double> @sitofp_v2i32_v2f64(<2 x i32> %x) #0 {
1060 ; SSE-LABEL: sitofp_v2i32_v2f64:
1062 ; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
1063 ; SSE-NEXT: ret{{[l|q]}}
1065 ; SSE41-LABEL: sitofp_v2i32_v2f64:
1067 ; SSE41-NEXT: cvtdq2pd %xmm0, %xmm0
1068 ; SSE41-NEXT: ret{{[l|q]}}
1070 ; AVX-LABEL: sitofp_v2i32_v2f64:
1072 ; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0
1073 ; AVX-NEXT: ret{{[l|q]}}
1074 %result = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %x,
1075 metadata !"round.dynamic",
1076 metadata !"fpexcept.strict") #0
1077 ret <2 x double> %result
1080 define <2 x double> @uitofp_v2i32_v2f64(<2 x i32> %x) #0 {
1081 ; SSE-LABEL: uitofp_v2i32_v2f64:
1083 ; SSE-NEXT: xorpd %xmm1, %xmm1
1084 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1085 ; SSE-NEXT: movapd {{.*#+}} xmm1 = [4.503599627370496E+15,4.503599627370496E+15]
1086 ; SSE-NEXT: orpd %xmm1, %xmm0
1087 ; SSE-NEXT: subpd %xmm1, %xmm0
1088 ; SSE-NEXT: ret{{[l|q]}}
1090 ; SSE41-LABEL: uitofp_v2i32_v2f64:
1092 ; SSE41-NEXT: xorpd %xmm1, %xmm1
1093 ; SSE41-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1094 ; SSE41-NEXT: movapd {{.*#+}} xmm1 = [4.503599627370496E+15,4.503599627370496E+15]
1095 ; SSE41-NEXT: orpd %xmm1, %xmm0
1096 ; SSE41-NEXT: subpd %xmm1, %xmm0
1097 ; SSE41-NEXT: ret{{[l|q]}}
1099 ; AVX1-LABEL: uitofp_v2i32_v2f64:
1101 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
1102 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [4.503599627370496E+15,4.503599627370496E+15]
1103 ; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
1104 ; AVX1-NEXT: vsubpd %xmm1, %xmm0, %xmm0
1105 ; AVX1-NEXT: ret{{[l|q]}}
1107 ; AVX512F-LABEL: uitofp_v2i32_v2f64:
1109 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1110 ; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
1111 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1112 ; AVX512F-NEXT: vzeroupper
1113 ; AVX512F-NEXT: ret{{[l|q]}}
1115 ; AVX512VL-LABEL: uitofp_v2i32_v2f64:
1116 ; AVX512VL: # %bb.0:
1117 ; AVX512VL-NEXT: vcvtudq2pd %xmm0, %xmm0
1118 ; AVX512VL-NEXT: ret{{[l|q]}}
1120 ; AVX512DQ-LABEL: uitofp_v2i32_v2f64:
1121 ; AVX512DQ: # %bb.0:
1122 ; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1123 ; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
1124 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1125 ; AVX512DQ-NEXT: vzeroupper
1126 ; AVX512DQ-NEXT: ret{{[l|q]}}
1128 ; AVX512DQVL-LABEL: uitofp_v2i32_v2f64:
1129 ; AVX512DQVL: # %bb.0:
1130 ; AVX512DQVL-NEXT: vcvtudq2pd %xmm0, %xmm0
1131 ; AVX512DQVL-NEXT: ret{{[l|q]}}
1132 %result = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %x,
1133 metadata !"round.dynamic",
1134 metadata !"fpexcept.strict") #0
1135 ret <2 x double> %result
1138 define <2 x double> @sitofp_v2i64_v2f64(<2 x i64> %x) #0 {
1139 ; SSE-32-LABEL: sitofp_v2i64_v2f64:
1141 ; SSE-32-NEXT: pushl %ebp
1142 ; SSE-32-NEXT: .cfi_def_cfa_offset 8
1143 ; SSE-32-NEXT: .cfi_offset %ebp, -8
1144 ; SSE-32-NEXT: movl %esp, %ebp
1145 ; SSE-32-NEXT: .cfi_def_cfa_register %ebp
1146 ; SSE-32-NEXT: andl $-8, %esp
1147 ; SSE-32-NEXT: subl $32, %esp
1148 ; SSE-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
1149 ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1150 ; SSE-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
1151 ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
1152 ; SSE-32-NEXT: fstpl {{[0-9]+}}(%esp)
1153 ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
1154 ; SSE-32-NEXT: fstpl (%esp)
1156 ; SSE-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1157 ; SSE-32-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1158 ; SSE-32-NEXT: movl %ebp, %esp
1159 ; SSE-32-NEXT: popl %ebp
1160 ; SSE-32-NEXT: .cfi_def_cfa %esp, 4
1163 ; SSE-64-LABEL: sitofp_v2i64_v2f64:
1165 ; SSE-64-NEXT: movq %xmm0, %rax
1166 ; SSE-64-NEXT: cvtsi2sd %rax, %xmm1
1167 ; SSE-64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1168 ; SSE-64-NEXT: movq %xmm0, %rax
1169 ; SSE-64-NEXT: xorps %xmm0, %xmm0
1170 ; SSE-64-NEXT: cvtsi2sd %rax, %xmm0
1171 ; SSE-64-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1172 ; SSE-64-NEXT: movapd %xmm1, %xmm0
1175 ; SSE41-32-LABEL: sitofp_v2i64_v2f64:
1176 ; SSE41-32: # %bb.0:
1177 ; SSE41-32-NEXT: pushl %ebp
1178 ; SSE41-32-NEXT: .cfi_def_cfa_offset 8
1179 ; SSE41-32-NEXT: .cfi_offset %ebp, -8
1180 ; SSE41-32-NEXT: movl %esp, %ebp
1181 ; SSE41-32-NEXT: .cfi_def_cfa_register %ebp
1182 ; SSE41-32-NEXT: andl $-8, %esp
1183 ; SSE41-32-NEXT: subl $32, %esp
1184 ; SSE41-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
1185 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1186 ; SSE41-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
1187 ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
1188 ; SSE41-32-NEXT: fstpl {{[0-9]+}}(%esp)
1189 ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
1190 ; SSE41-32-NEXT: fstpl (%esp)
1191 ; SSE41-32-NEXT: wait
1192 ; SSE41-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1193 ; SSE41-32-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1194 ; SSE41-32-NEXT: movl %ebp, %esp
1195 ; SSE41-32-NEXT: popl %ebp
1196 ; SSE41-32-NEXT: .cfi_def_cfa %esp, 4
1197 ; SSE41-32-NEXT: retl
1199 ; SSE41-64-LABEL: sitofp_v2i64_v2f64:
1200 ; SSE41-64: # %bb.0:
1201 ; SSE41-64-NEXT: movq %xmm0, %rax
1202 ; SSE41-64-NEXT: cvtsi2sd %rax, %xmm1
1203 ; SSE41-64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
1204 ; SSE41-64-NEXT: movq %xmm0, %rax
1205 ; SSE41-64-NEXT: xorps %xmm0, %xmm0
1206 ; SSE41-64-NEXT: cvtsi2sd %rax, %xmm0
1207 ; SSE41-64-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
1208 ; SSE41-64-NEXT: movapd %xmm1, %xmm0
1209 ; SSE41-64-NEXT: retq
1211 ; AVX-32-LABEL: sitofp_v2i64_v2f64:
1213 ; AVX-32-NEXT: pushl %ebp
1214 ; AVX-32-NEXT: .cfi_def_cfa_offset 8
1215 ; AVX-32-NEXT: .cfi_offset %ebp, -8
1216 ; AVX-32-NEXT: movl %esp, %ebp
1217 ; AVX-32-NEXT: .cfi_def_cfa_register %ebp
1218 ; AVX-32-NEXT: andl $-8, %esp
1219 ; AVX-32-NEXT: subl $32, %esp
1220 ; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
1221 ; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
1222 ; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
1223 ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
1224 ; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
1225 ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
1226 ; AVX-32-NEXT: fstpl (%esp)
1228 ; AVX-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
1229 ; AVX-32-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1230 ; AVX-32-NEXT: movl %ebp, %esp
1231 ; AVX-32-NEXT: popl %ebp
1232 ; AVX-32-NEXT: .cfi_def_cfa %esp, 4
1235 ; AVX-64-LABEL: sitofp_v2i64_v2f64:
1237 ; AVX-64-NEXT: vpextrq $1, %xmm0, %rax
1238 ; AVX-64-NEXT: vcvtsi2sd %rax, %xmm1, %xmm1
1239 ; AVX-64-NEXT: vmovq %xmm0, %rax
1240 ; AVX-64-NEXT: vcvtsi2sd %rax, %xmm2, %xmm0
1241 ; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1244 ; AVX512DQ-LABEL: sitofp_v2i64_v2f64:
1245 ; AVX512DQ: # %bb.0:
1246 ; AVX512DQ-NEXT: vmovaps %xmm0, %xmm0
1247 ; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
1248 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1249 ; AVX512DQ-NEXT: vzeroupper
1250 ; AVX512DQ-NEXT: ret{{[l|q]}}
1252 ; AVX512DQVL-LABEL: sitofp_v2i64_v2f64:
1253 ; AVX512DQVL: # %bb.0:
1254 ; AVX512DQVL-NEXT: vcvtqq2pd %xmm0, %xmm0
1255 ; AVX512DQVL-NEXT: ret{{[l|q]}}
1256 %result = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %x,
1257 metadata !"round.dynamic",
1258 metadata !"fpexcept.strict") #0
1259 ret <2 x double> %result
1262 define <2 x double> @uitofp_v2i64_v2f64(<2 x i64> %x) #0 {
1263 ; SSE-32-LABEL: uitofp_v2i64_v2f64:
1265 ; SSE-32-NEXT: pushl %ebp
1266 ; SSE-32-NEXT: .cfi_def_cfa_offset 8
1267 ; SSE-32-NEXT: .cfi_offset %ebp, -8
1268 ; SSE-32-NEXT: movl %esp, %ebp
1269 ; SSE-32-NEXT: .cfi_def_cfa_register %ebp
1270 ; SSE-32-NEXT: andl $-8, %esp
1271 ; SSE-32-NEXT: subl $32, %esp
1272 ; SSE-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
1273 ; SSE-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
1274 ; SSE-32-NEXT: movq %xmm1, {{[0-9]+}}(%esp)
1275 ; SSE-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
1276 ; SSE-32-NEXT: movd %xmm1, %eax
1277 ; SSE-32-NEXT: shrl $31, %eax
1278 ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
1279 ; SSE-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
1280 ; SSE-32-NEXT: fstpl {{[0-9]+}}(%esp)
1282 ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
1283 ; SSE-32-NEXT: movd %xmm0, %eax
1284 ; SSE-32-NEXT: shrl $31, %eax
1285 ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
1286 ; SSE-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
1287 ; SSE-32-NEXT: fstpl (%esp)
1289 ; SSE-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1290 ; SSE-32-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1291 ; SSE-32-NEXT: movl %ebp, %esp
1292 ; SSE-32-NEXT: popl %ebp
1293 ; SSE-32-NEXT: .cfi_def_cfa %esp, 4
1296 ; SSE-64-LABEL: uitofp_v2i64_v2f64:
1298 ; SSE-64-NEXT: movdqa %xmm0, %xmm1
1299 ; SSE-64-NEXT: movq %xmm0, %rax
1300 ; SSE-64-NEXT: movq %rax, %rcx
1301 ; SSE-64-NEXT: shrq %rcx
1302 ; SSE-64-NEXT: movl %eax, %edx
1303 ; SSE-64-NEXT: andl $1, %edx
1304 ; SSE-64-NEXT: orq %rcx, %rdx
1305 ; SSE-64-NEXT: testq %rax, %rax
1306 ; SSE-64-NEXT: cmovnsq %rax, %rdx
1307 ; SSE-64-NEXT: xorps %xmm0, %xmm0
1308 ; SSE-64-NEXT: cvtsi2sd %rdx, %xmm0
1309 ; SSE-64-NEXT: jns .LBB21_2
1310 ; SSE-64-NEXT: # %bb.1:
1311 ; SSE-64-NEXT: addsd %xmm0, %xmm0
1312 ; SSE-64-NEXT: .LBB21_2:
1313 ; SSE-64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
1314 ; SSE-64-NEXT: movq %xmm1, %rax
1315 ; SSE-64-NEXT: movq %rax, %rcx
1316 ; SSE-64-NEXT: shrq %rcx
1317 ; SSE-64-NEXT: movl %eax, %edx
1318 ; SSE-64-NEXT: andl $1, %edx
1319 ; SSE-64-NEXT: orq %rcx, %rdx
1320 ; SSE-64-NEXT: testq %rax, %rax
1321 ; SSE-64-NEXT: cmovnsq %rax, %rdx
1322 ; SSE-64-NEXT: xorps %xmm1, %xmm1
1323 ; SSE-64-NEXT: cvtsi2sd %rdx, %xmm1
1324 ; SSE-64-NEXT: jns .LBB21_4
1325 ; SSE-64-NEXT: # %bb.3:
1326 ; SSE-64-NEXT: addsd %xmm1, %xmm1
1327 ; SSE-64-NEXT: .LBB21_4:
1328 ; SSE-64-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1331 ; SSE41-32-LABEL: uitofp_v2i64_v2f64:
1332 ; SSE41-32: # %bb.0:
1333 ; SSE41-32-NEXT: pushl %ebp
1334 ; SSE41-32-NEXT: .cfi_def_cfa_offset 8
1335 ; SSE41-32-NEXT: .cfi_offset %ebp, -8
1336 ; SSE41-32-NEXT: movl %esp, %ebp
1337 ; SSE41-32-NEXT: .cfi_def_cfa_register %ebp
1338 ; SSE41-32-NEXT: andl $-8, %esp
1339 ; SSE41-32-NEXT: subl $32, %esp
1340 ; SSE41-32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
1341 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
1342 ; SSE41-32-NEXT: movq %xmm1, {{[0-9]+}}(%esp)
1343 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
1344 ; SSE41-32-NEXT: movd %xmm1, %eax
1345 ; SSE41-32-NEXT: shrl $31, %eax
1346 ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
1347 ; SSE41-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
1348 ; SSE41-32-NEXT: fstpl {{[0-9]+}}(%esp)
1349 ; SSE41-32-NEXT: wait
1350 ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
1351 ; SSE41-32-NEXT: movd %xmm0, %eax
1352 ; SSE41-32-NEXT: shrl $31, %eax
1353 ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
1354 ; SSE41-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
1355 ; SSE41-32-NEXT: fstpl (%esp)
1356 ; SSE41-32-NEXT: wait
1357 ; SSE41-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
1358 ; SSE41-32-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1359 ; SSE41-32-NEXT: movl %ebp, %esp
1360 ; SSE41-32-NEXT: popl %ebp
1361 ; SSE41-32-NEXT: .cfi_def_cfa %esp, 4
1362 ; SSE41-32-NEXT: retl
1364 ; SSE41-64-LABEL: uitofp_v2i64_v2f64:
1365 ; SSE41-64: # %bb.0:
1366 ; SSE41-64-NEXT: movdqa %xmm0, %xmm1
1367 ; SSE41-64-NEXT: movq %xmm0, %rax
1368 ; SSE41-64-NEXT: movq %rax, %rcx
1369 ; SSE41-64-NEXT: shrq %rcx
1370 ; SSE41-64-NEXT: movl %eax, %edx
1371 ; SSE41-64-NEXT: andl $1, %edx
1372 ; SSE41-64-NEXT: orq %rcx, %rdx
1373 ; SSE41-64-NEXT: testq %rax, %rax
1374 ; SSE41-64-NEXT: cmovnsq %rax, %rdx
1375 ; SSE41-64-NEXT: xorps %xmm0, %xmm0
1376 ; SSE41-64-NEXT: cvtsi2sd %rdx, %xmm0
1377 ; SSE41-64-NEXT: jns .LBB21_2
1378 ; SSE41-64-NEXT: # %bb.1:
1379 ; SSE41-64-NEXT: addsd %xmm0, %xmm0
1380 ; SSE41-64-NEXT: .LBB21_2:
1381 ; SSE41-64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
1382 ; SSE41-64-NEXT: movq %xmm1, %rax
1383 ; SSE41-64-NEXT: movq %rax, %rcx
1384 ; SSE41-64-NEXT: shrq %rcx
1385 ; SSE41-64-NEXT: movl %eax, %edx
1386 ; SSE41-64-NEXT: andl $1, %edx
1387 ; SSE41-64-NEXT: orq %rcx, %rdx
1388 ; SSE41-64-NEXT: testq %rax, %rax
1389 ; SSE41-64-NEXT: cmovnsq %rax, %rdx
1390 ; SSE41-64-NEXT: xorps %xmm1, %xmm1
1391 ; SSE41-64-NEXT: cvtsi2sd %rdx, %xmm1
1392 ; SSE41-64-NEXT: jns .LBB21_4
1393 ; SSE41-64-NEXT: # %bb.3:
1394 ; SSE41-64-NEXT: addsd %xmm1, %xmm1
1395 ; SSE41-64-NEXT: .LBB21_4:
1396 ; SSE41-64-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1397 ; SSE41-64-NEXT: retq
1399 ; AVX-32-LABEL: uitofp_v2i64_v2f64:
1401 ; AVX-32-NEXT: pushl %ebp
1402 ; AVX-32-NEXT: .cfi_def_cfa_offset 8
1403 ; AVX-32-NEXT: .cfi_offset %ebp, -8
1404 ; AVX-32-NEXT: movl %esp, %ebp
1405 ; AVX-32-NEXT: .cfi_def_cfa_register %ebp
1406 ; AVX-32-NEXT: andl $-8, %esp
1407 ; AVX-32-NEXT: subl $32, %esp
1408 ; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
1409 ; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
1410 ; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
1411 ; AVX-32-NEXT: vextractps $1, %xmm0, %eax
1412 ; AVX-32-NEXT: shrl $31, %eax
1413 ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
1414 ; AVX-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
1415 ; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
1417 ; AVX-32-NEXT: vextractps $3, %xmm0, %eax
1418 ; AVX-32-NEXT: shrl $31, %eax
1419 ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
1420 ; AVX-32-NEXT: fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
1421 ; AVX-32-NEXT: fstpl (%esp)
1423 ; AVX-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
1424 ; AVX-32-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
1425 ; AVX-32-NEXT: movl %ebp, %esp
1426 ; AVX-32-NEXT: popl %ebp
1427 ; AVX-32-NEXT: .cfi_def_cfa %esp, 4
1430 ; AVX1-64-LABEL: uitofp_v2i64_v2f64:
1432 ; AVX1-64-NEXT: vpextrq $1, %xmm0, %rax
1433 ; AVX1-64-NEXT: movq %rax, %rcx
1434 ; AVX1-64-NEXT: shrq %rcx
1435 ; AVX1-64-NEXT: movl %eax, %edx
1436 ; AVX1-64-NEXT: andl $1, %edx
1437 ; AVX1-64-NEXT: orq %rcx, %rdx
1438 ; AVX1-64-NEXT: testq %rax, %rax
1439 ; AVX1-64-NEXT: cmovnsq %rax, %rdx
1440 ; AVX1-64-NEXT: vcvtsi2sd %rdx, %xmm1, %xmm1
1441 ; AVX1-64-NEXT: jns .LBB21_2
1442 ; AVX1-64-NEXT: # %bb.1:
1443 ; AVX1-64-NEXT: vaddsd %xmm1, %xmm1, %xmm1
1444 ; AVX1-64-NEXT: .LBB21_2:
1445 ; AVX1-64-NEXT: vmovq %xmm0, %rax
1446 ; AVX1-64-NEXT: movq %rax, %rcx
1447 ; AVX1-64-NEXT: shrq %rcx
1448 ; AVX1-64-NEXT: movl %eax, %edx
1449 ; AVX1-64-NEXT: andl $1, %edx
1450 ; AVX1-64-NEXT: orq %rcx, %rdx
1451 ; AVX1-64-NEXT: testq %rax, %rax
1452 ; AVX1-64-NEXT: cmovnsq %rax, %rdx
1453 ; AVX1-64-NEXT: vcvtsi2sd %rdx, %xmm2, %xmm0
1454 ; AVX1-64-NEXT: jns .LBB21_4
1455 ; AVX1-64-NEXT: # %bb.3:
1456 ; AVX1-64-NEXT: vaddsd %xmm0, %xmm0, %xmm0
1457 ; AVX1-64-NEXT: .LBB21_4:
1458 ; AVX1-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1459 ; AVX1-64-NEXT: retq
1461 ; AVX512F-64-LABEL: uitofp_v2i64_v2f64:
1462 ; AVX512F-64: # %bb.0:
1463 ; AVX512F-64-NEXT: vpextrq $1, %xmm0, %rax
1464 ; AVX512F-64-NEXT: vcvtusi2sd %rax, %xmm1, %xmm1
1465 ; AVX512F-64-NEXT: vmovq %xmm0, %rax
1466 ; AVX512F-64-NEXT: vcvtusi2sd %rax, %xmm2, %xmm0
1467 ; AVX512F-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1468 ; AVX512F-64-NEXT: retq
1470 ; AVX512VL-64-LABEL: uitofp_v2i64_v2f64:
1471 ; AVX512VL-64: # %bb.0:
1472 ; AVX512VL-64-NEXT: vpextrq $1, %xmm0, %rax
1473 ; AVX512VL-64-NEXT: vcvtusi2sd %rax, %xmm1, %xmm1
1474 ; AVX512VL-64-NEXT: vmovq %xmm0, %rax
1475 ; AVX512VL-64-NEXT: vcvtusi2sd %rax, %xmm2, %xmm0
1476 ; AVX512VL-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1477 ; AVX512VL-64-NEXT: retq
1479 ; AVX512DQ-LABEL: uitofp_v2i64_v2f64:
1480 ; AVX512DQ: # %bb.0:
1481 ; AVX512DQ-NEXT: vmovaps %xmm0, %xmm0
1482 ; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
1483 ; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
1484 ; AVX512DQ-NEXT: vzeroupper
1485 ; AVX512DQ-NEXT: ret{{[l|q]}}
1487 ; AVX512DQVL-LABEL: uitofp_v2i64_v2f64:
1488 ; AVX512DQVL: # %bb.0:
1489 ; AVX512DQVL-NEXT: vcvtuqq2pd %xmm0, %xmm0
1490 ; AVX512DQVL-NEXT: ret{{[l|q]}}
1491 %result = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %x,
1492 metadata !"round.dynamic",
1493 metadata !"fpexcept.strict") #0
1494 ret <2 x double> %result
1497 attributes #0 = { strictfp }