1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; Tests for SSE1 and below, without SSE2+.
3 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=pentium3 -O3 | FileCheck %s --check-prefix=X32
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse2,+sse -O3 | FileCheck %s --check-prefix=X64
7 ;define <4 x i32> @test3(<4 x i16> %a) nounwind {
8 ; %c = sext <4 x i16> %a to <4 x i32> ; <<4 x i32>> [#uses=1]
12 ; This should not emit shuffles to populate the top 2 elements of the 4-element
13 ; vector that this ends up returning.
15 define <2 x float> @test4(<2 x float> %A, <2 x float> %B) nounwind {
17 ; X32: # BB#0: # %entry
18 ; X32-NEXT: movaps %xmm0, %xmm2
19 ; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
20 ; X32-NEXT: addss %xmm1, %xmm0
21 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
22 ; X32-NEXT: subss %xmm1, %xmm2
23 ; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
27 ; X64: # BB#0: # %entry
28 ; X64-NEXT: movaps %xmm0, %xmm2
29 ; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
30 ; X64-NEXT: addss %xmm1, %xmm0
31 ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
32 ; X64-NEXT: subss %xmm1, %xmm2
33 ; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
36 %tmp7 = extractelement <2 x float> %A, i32 0
37 %tmp5 = extractelement <2 x float> %A, i32 1
38 %tmp3 = extractelement <2 x float> %B, i32 0
39 %tmp1 = extractelement <2 x float> %B, i32 1
40 %add.r = fadd float %tmp7, %tmp3
41 %add.i = fsub float %tmp5, %tmp1
42 %tmp11 = insertelement <2 x float> undef, float %add.r, i32 0
43 %tmp9 = insertelement <2 x float> %tmp11, float %add.i, i32 1
47 ; We used to get stuck in type legalization for this example when lowering the
48 ; vselect. With SSE1 v4f32 is a legal type but v4i1 (or any vector integer type)
49 ; is not. We used to ping pong between splitting the vselect for the v4i
50 ; condition operand and widening the resulting vselect for the v4f32 result.
53 define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
55 ; X32: # BB#0: # %entry
56 ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
57 ; X32-NEXT: xorps %xmm0, %xmm0
58 ; X32-NEXT: je .LBB1_1
59 ; X32-NEXT: # BB#2: # %entry
60 ; X32-NEXT: xorps %xmm1, %xmm1
61 ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
62 ; X32-NEXT: jne .LBB1_5
64 ; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
65 ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
66 ; X32-NEXT: jne .LBB1_8
68 ; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
69 ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
70 ; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
71 ; X32-NEXT: je .LBB1_10
72 ; X32-NEXT: jmp .LBB1_11
74 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
75 ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
76 ; X32-NEXT: je .LBB1_4
77 ; X32-NEXT: .LBB1_5: # %entry
78 ; X32-NEXT: xorps %xmm2, %xmm2
79 ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
80 ; X32-NEXT: je .LBB1_7
81 ; X32-NEXT: .LBB1_8: # %entry
82 ; X32-NEXT: xorps %xmm3, %xmm3
83 ; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
84 ; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
85 ; X32-NEXT: jne .LBB1_11
87 ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
88 ; X32-NEXT: .LBB1_11: # %entry
89 ; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
90 ; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
94 ; X64: # BB#0: # %entry
95 ; X64-NEXT: testl %edx, %edx
96 ; X64-NEXT: xorps %xmm0, %xmm0
97 ; X64-NEXT: je .LBB1_1
98 ; X64-NEXT: # BB#2: # %entry
99 ; X64-NEXT: xorps %xmm1, %xmm1
100 ; X64-NEXT: testl %ecx, %ecx
101 ; X64-NEXT: jne .LBB1_5
103 ; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
104 ; X64-NEXT: testl %r8d, %r8d
105 ; X64-NEXT: jne .LBB1_8
107 ; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
108 ; X64-NEXT: testl %esi, %esi
109 ; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
110 ; X64-NEXT: je .LBB1_10
111 ; X64-NEXT: jmp .LBB1_11
113 ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
114 ; X64-NEXT: testl %ecx, %ecx
115 ; X64-NEXT: je .LBB1_4
116 ; X64-NEXT: .LBB1_5: # %entry
117 ; X64-NEXT: xorps %xmm2, %xmm2
118 ; X64-NEXT: testl %r8d, %r8d
119 ; X64-NEXT: je .LBB1_7
120 ; X64-NEXT: .LBB1_8: # %entry
121 ; X64-NEXT: xorps %xmm3, %xmm3
122 ; X64-NEXT: testl %esi, %esi
123 ; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
124 ; X64-NEXT: jne .LBB1_11
125 ; X64-NEXT: .LBB1_10:
126 ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
127 ; X64-NEXT: .LBB1_11: # %entry
128 ; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
129 ; X64-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
132 %a1 = icmp eq <4 x i32> %q, zeroinitializer
133 %a14 = select <4 x i1> %a1, <4 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+0> , <4 x float> zeroinitializer
137 ; v4i32 isn't legal for SSE1, but this should be cmpps.
139 define <4 x float> @PR28044(<4 x float> %a0, <4 x float> %a1) nounwind {
140 ; X32-LABEL: PR28044:
142 ; X32-NEXT: cmpeqps %xmm1, %xmm0
145 ; X64-LABEL: PR28044:
147 ; X64-NEXT: cmpeqps %xmm1, %xmm0
149 %cmp = fcmp oeq <4 x float> %a0, %a1
150 %sext = sext <4 x i1> %cmp to <4 x i32>
151 %res = bitcast <4 x i32> %sext to <4 x float>
155 ; Don't crash trying to do the impossible: an integer vector comparison doesn't exist, so we must scalarize.
156 ; https://llvm.org/bugs/show_bug.cgi?id=30512
158 define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
159 ; X32-LABEL: PR30512:
161 ; X32-NEXT: pushl %ebx
162 ; X32-NEXT: pushl %edi
163 ; X32-NEXT: pushl %esi
164 ; X32-NEXT: subl $16, %esp
165 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
166 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
167 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
168 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
169 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
170 ; X32-NEXT: xorl %ebx, %ebx
171 ; X32-NEXT: cmpl {{[0-9]+}}(%esp), %edi
173 ; X32-NEXT: negl %ebx
174 ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp)
175 ; X32-NEXT: xorl %ebx, %ebx
176 ; X32-NEXT: cmpl {{[0-9]+}}(%esp), %esi
178 ; X32-NEXT: negl %ebx
179 ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp)
180 ; X32-NEXT: xorl %ebx, %ebx
181 ; X32-NEXT: cmpl {{[0-9]+}}(%esp), %edx
183 ; X32-NEXT: negl %ebx
184 ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp)
185 ; X32-NEXT: xorl %edx, %edx
186 ; X32-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
188 ; X32-NEXT: negl %edx
189 ; X32-NEXT: movl %edx, (%esp)
190 ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
191 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
192 ; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
193 ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
194 ; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
195 ; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
196 ; X32-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
197 ; X32-NEXT: andps {{\.LCPI.*}}, %xmm2
198 ; X32-NEXT: movaps %xmm2, (%eax)
199 ; X32-NEXT: addl $16, %esp
200 ; X32-NEXT: popl %esi
201 ; X32-NEXT: popl %edi
202 ; X32-NEXT: popl %ebx
205 ; X64-LABEL: PR30512:
207 ; X64-NEXT: xorl %eax, %eax
208 ; X64-NEXT: cmpl {{[0-9]+}}(%rsp), %r8d
210 ; X64-NEXT: negl %eax
211 ; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
212 ; X64-NEXT: xorl %eax, %eax
213 ; X64-NEXT: cmpl {{[0-9]+}}(%rsp), %ecx
215 ; X64-NEXT: negl %eax
216 ; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
217 ; X64-NEXT: xorl %eax, %eax
218 ; X64-NEXT: cmpl {{[0-9]+}}(%rsp), %edx
220 ; X64-NEXT: negl %eax
221 ; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
222 ; X64-NEXT: xorl %eax, %eax
223 ; X64-NEXT: cmpl %r9d, %esi
225 ; X64-NEXT: negl %eax
226 ; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
227 ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
228 ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
229 ; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
230 ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
231 ; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
232 ; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
233 ; X64-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
234 ; X64-NEXT: andps {{.*}}(%rip), %xmm2
235 ; X64-NEXT: movaps %xmm2, (%rdi)
236 ; X64-NEXT: movq %rdi, %rax
238 %cmp = icmp eq <4 x i32> %x, %y
239 %zext = zext <4 x i1> %cmp to <4 x i32>
243 ; Fragile test warning - we need to induce the generation of a vselect
244 ; post-legalization to cause the crash seen in:
245 ; https://llvm.org/bugs/show_bug.cgi?id=31672
246 ; Is there a way to do that without an unsafe/fast sqrt intrinsic call?
248 ; We now no longer try to lower sqrt using rsqrt with SSE1 only as the
249 ; v4i32 vselect mentioned above should never have been created. We ended up
250 ; scalarizing it anyway.
252 define <2 x float> @PR31672() #0 {
253 ; X32-LABEL: PR31672:
255 ; X32-NEXT: sqrtps {{\.LCPI.*}}, %xmm0
258 ; X64-LABEL: PR31672:
260 ; X64-NEXT: sqrtps {{.*}}(%rip), %xmm0
262 %t0 = call fast <2 x float> @llvm.sqrt.v2f32(<2 x float> <float 42.0, float 3.0>)
266 declare <2 x float> @llvm.sqrt.v2f32(<2 x float>) #1
268 attributes #0 = { nounwind "unsafe-fp-math"="true" }