1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32-SSE
3 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64-SSE
5 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
7 define void @fptrunc_frommem2(<2 x double>* %in, <2 x float>* %out) {
8 ; X32-SSE-LABEL: fptrunc_frommem2:
9 ; X32-SSE: # %bb.0: # %entry
10 ; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
11 ; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
12 ; X32-SSE-NEXT: cvtpd2ps (%ecx), %xmm0
13 ; X32-SSE-NEXT: movlpd %xmm0, (%eax)
16 ; X32-AVX-LABEL: fptrunc_frommem2:
17 ; X32-AVX: # %bb.0: # %entry
18 ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
19 ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
20 ; X32-AVX-NEXT: vcvtpd2psx (%ecx), %xmm0
21 ; X32-AVX-NEXT: vmovlpd %xmm0, (%eax)
24 ; X64-SSE-LABEL: fptrunc_frommem2:
25 ; X64-SSE: # %bb.0: # %entry
26 ; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm0
27 ; X64-SSE-NEXT: movlpd %xmm0, (%rsi)
30 ; X64-AVX-LABEL: fptrunc_frommem2:
31 ; X64-AVX: # %bb.0: # %entry
32 ; X64-AVX-NEXT: vcvtpd2psx (%rdi), %xmm0
33 ; X64-AVX-NEXT: vmovlpd %xmm0, (%rsi)
36 %0 = load <2 x double>, <2 x double>* %in
37 %1 = fptrunc <2 x double> %0 to <2 x float>
38 store <2 x float> %1, <2 x float>* %out, align 1
42 define void @fptrunc_frommem4(<4 x double>* %in, <4 x float>* %out) {
43 ; X32-SSE-LABEL: fptrunc_frommem4:
44 ; X32-SSE: # %bb.0: # %entry
45 ; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
46 ; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
47 ; X32-SSE-NEXT: cvtpd2ps 16(%ecx), %xmm0
48 ; X32-SSE-NEXT: cvtpd2ps (%ecx), %xmm1
49 ; X32-SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
50 ; X32-SSE-NEXT: movupd %xmm1, (%eax)
53 ; X32-AVX-LABEL: fptrunc_frommem4:
54 ; X32-AVX: # %bb.0: # %entry
55 ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
56 ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
57 ; X32-AVX-NEXT: vcvtpd2psy (%ecx), %xmm0
58 ; X32-AVX-NEXT: vmovupd %xmm0, (%eax)
61 ; X64-SSE-LABEL: fptrunc_frommem4:
62 ; X64-SSE: # %bb.0: # %entry
63 ; X64-SSE-NEXT: cvtpd2ps 16(%rdi), %xmm0
64 ; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm1
65 ; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
66 ; X64-SSE-NEXT: movupd %xmm1, (%rsi)
69 ; X64-AVX-LABEL: fptrunc_frommem4:
70 ; X64-AVX: # %bb.0: # %entry
71 ; X64-AVX-NEXT: vcvtpd2psy (%rdi), %xmm0
72 ; X64-AVX-NEXT: vmovupd %xmm0, (%rsi)
75 %0 = load <4 x double>, <4 x double>* %in
76 %1 = fptrunc <4 x double> %0 to <4 x float>
77 store <4 x float> %1, <4 x float>* %out, align 1
81 define void @fptrunc_frommem8(<8 x double>* %in, <8 x float>* %out) {
82 ; X32-SSE-LABEL: fptrunc_frommem8:
83 ; X32-SSE: # %bb.0: # %entry
84 ; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
85 ; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
86 ; X32-SSE-NEXT: cvtpd2ps 16(%ecx), %xmm0
87 ; X32-SSE-NEXT: cvtpd2ps (%ecx), %xmm1
88 ; X32-SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
89 ; X32-SSE-NEXT: cvtpd2ps 48(%ecx), %xmm0
90 ; X32-SSE-NEXT: cvtpd2ps 32(%ecx), %xmm2
91 ; X32-SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm0[0]
92 ; X32-SSE-NEXT: movupd %xmm2, 16(%eax)
93 ; X32-SSE-NEXT: movupd %xmm1, (%eax)
96 ; X32-AVX-LABEL: fptrunc_frommem8:
97 ; X32-AVX: # %bb.0: # %entry
98 ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
99 ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
100 ; X32-AVX-NEXT: vcvtpd2psy (%ecx), %xmm0
101 ; X32-AVX-NEXT: vcvtpd2psy 32(%ecx), %xmm1
102 ; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
103 ; X32-AVX-NEXT: vmovups %ymm0, (%eax)
104 ; X32-AVX-NEXT: vzeroupper
107 ; X64-SSE-LABEL: fptrunc_frommem8:
108 ; X64-SSE: # %bb.0: # %entry
109 ; X64-SSE-NEXT: cvtpd2ps 16(%rdi), %xmm0
110 ; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm1
111 ; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
112 ; X64-SSE-NEXT: cvtpd2ps 48(%rdi), %xmm0
113 ; X64-SSE-NEXT: cvtpd2ps 32(%rdi), %xmm2
114 ; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm0[0]
115 ; X64-SSE-NEXT: movupd %xmm2, 16(%rsi)
116 ; X64-SSE-NEXT: movupd %xmm1, (%rsi)
119 ; X64-AVX-LABEL: fptrunc_frommem8:
120 ; X64-AVX: # %bb.0: # %entry
121 ; X64-AVX-NEXT: vcvtpd2psy (%rdi), %xmm0
122 ; X64-AVX-NEXT: vcvtpd2psy 32(%rdi), %xmm1
123 ; X64-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
124 ; X64-AVX-NEXT: vmovups %ymm0, (%rsi)
125 ; X64-AVX-NEXT: vzeroupper
128 %0 = load <8 x double>, <8 x double>* %in
129 %1 = fptrunc <8 x double> %0 to <8 x float>
130 store <8 x float> %1, <8 x float>* %out, align 1
134 define <4 x float> @fptrunc_frommem2_zext(<2 x double> * %ld) {
135 ; X32-SSE-LABEL: fptrunc_frommem2_zext:
137 ; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
138 ; X32-SSE-NEXT: cvtpd2ps (%eax), %xmm0
141 ; X32-AVX-LABEL: fptrunc_frommem2_zext:
143 ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
144 ; X32-AVX-NEXT: vcvtpd2psx (%eax), %xmm0
147 ; X64-SSE-LABEL: fptrunc_frommem2_zext:
149 ; X64-SSE-NEXT: cvtpd2ps (%rdi), %xmm0
152 ; X64-AVX-LABEL: fptrunc_frommem2_zext:
154 ; X64-AVX-NEXT: vcvtpd2psx (%rdi), %xmm0
156 %arg = load <2 x double>, <2 x double> * %ld, align 16
157 %cvt = fptrunc <2 x double> %arg to <2 x float>
158 %ret = shufflevector <2 x float> %cvt, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
162 define <4 x float> @fptrunc_fromreg2_zext(<2 x double> %arg) {
163 ; X32-SSE-LABEL: fptrunc_fromreg2_zext:
165 ; X32-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
168 ; X32-AVX-LABEL: fptrunc_fromreg2_zext:
170 ; X32-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
173 ; X64-SSE-LABEL: fptrunc_fromreg2_zext:
175 ; X64-SSE-NEXT: cvtpd2ps %xmm0, %xmm0
178 ; X64-AVX-LABEL: fptrunc_fromreg2_zext:
180 ; X64-AVX-NEXT: vcvtpd2ps %xmm0, %xmm0
182 %cvt = fptrunc <2 x double> %arg to <2 x float>
183 %ret = shufflevector <2 x float> %cvt, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
187 ; FIXME: For exact truncations we should be able to fold this.
188 define <4 x float> @fptrunc_fromconst() {
189 ; X32-SSE-LABEL: fptrunc_fromconst:
190 ; X32-SSE: # %bb.0: # %entry
191 ; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm1
192 ; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm0
193 ; X32-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
196 ; X32-AVX-LABEL: fptrunc_fromconst:
197 ; X32-AVX: # %bb.0: # %entry
198 ; X32-AVX-NEXT: vcvtpd2psy {{\.LCPI.*}}, %xmm0
201 ; X64-SSE-LABEL: fptrunc_fromconst:
202 ; X64-SSE: # %bb.0: # %entry
203 ; X64-SSE-NEXT: cvtpd2ps {{.*}}(%rip), %xmm1
204 ; X64-SSE-NEXT: cvtpd2ps {{.*}}(%rip), %xmm0
205 ; X64-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
208 ; X64-AVX-LABEL: fptrunc_fromconst:
209 ; X64-AVX: # %bb.0: # %entry
210 ; X64-AVX-NEXT: vcvtpd2psy {{.*}}(%rip), %xmm0
213 %0 = insertelement <4 x double> undef, double 1.0, i32 0
214 %1 = insertelement <4 x double> %0, double -2.0, i32 1
215 %2 = insertelement <4 x double> %1, double +4.0, i32 2
216 %3 = insertelement <4 x double> %2, double -0.0, i32 3
217 %4 = fptrunc <4 x double> %3 to <4 x float>