1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
6 define <16 x i8> @v16i8_icmp_uge(<16 x i8> %a, <16 x i8> %b) nounwind readnone ssp uwtable {
7 ; SSE-LABEL: v16i8_icmp_uge:
9 ; SSE-NEXT: pmaxub %xmm0, %xmm1
10 ; SSE-NEXT: pcmpeqb %xmm1, %xmm0
13 ; AVX-LABEL: v16i8_icmp_uge:
15 ; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
16 ; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
18 %1 = icmp uge <16 x i8> %a, %b
19 %2 = sext <16 x i1> %1 to <16 x i8>
23 define <16 x i8> @v16i8_icmp_ule(<16 x i8> %a, <16 x i8> %b) nounwind readnone ssp uwtable {
24 ; SSE-LABEL: v16i8_icmp_ule:
26 ; SSE-NEXT: pminub %xmm0, %xmm1
27 ; SSE-NEXT: pcmpeqb %xmm1, %xmm0
30 ; AVX-LABEL: v16i8_icmp_ule:
32 ; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm1
33 ; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
35 %1 = icmp ule <16 x i8> %a, %b
36 %2 = sext <16 x i1> %1 to <16 x i8>
40 define <8 x i16> @v8i16_icmp_uge(<8 x i16> %a, <8 x i16> %b) nounwind readnone ssp uwtable {
41 ; SSE2-LABEL: v8i16_icmp_uge:
43 ; SSE2-NEXT: psubusw %xmm0, %xmm1
44 ; SSE2-NEXT: pxor %xmm0, %xmm0
45 ; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
48 ; SSE41-LABEL: v8i16_icmp_uge:
50 ; SSE41-NEXT: pmaxuw %xmm0, %xmm1
51 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
54 ; AVX-LABEL: v8i16_icmp_uge:
56 ; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
57 ; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
59 %1 = icmp uge <8 x i16> %a, %b
60 %2 = sext <8 x i1> %1 to <8 x i16>
64 define <8 x i16> @v8i16_icmp_ule(<8 x i16> %a, <8 x i16> %b) nounwind readnone ssp uwtable {
65 ; SSE2-LABEL: v8i16_icmp_ule:
67 ; SSE2-NEXT: psubusw %xmm1, %xmm0
68 ; SSE2-NEXT: pxor %xmm1, %xmm1
69 ; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
72 ; SSE41-LABEL: v8i16_icmp_ule:
74 ; SSE41-NEXT: pminuw %xmm0, %xmm1
75 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
78 ; AVX-LABEL: v8i16_icmp_ule:
80 ; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm1
81 ; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
83 %1 = icmp ule <8 x i16> %a, %b
84 %2 = sext <8 x i1> %1 to <8 x i16>
88 define <4 x i32> @v4i32_icmp_uge(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp uwtable {
89 ; SSE2-LABEL: v4i32_icmp_uge:
91 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
92 ; SSE2-NEXT: pxor %xmm2, %xmm0
93 ; SSE2-NEXT: pxor %xmm1, %xmm2
94 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
95 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
96 ; SSE2-NEXT: pxor %xmm2, %xmm0
99 ; SSE41-LABEL: v4i32_icmp_uge:
101 ; SSE41-NEXT: pmaxud %xmm0, %xmm1
102 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
105 ; AVX-LABEL: v4i32_icmp_uge:
107 ; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
108 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
110 %1 = icmp uge <4 x i32> %a, %b
111 %2 = sext <4 x i1> %1 to <4 x i32>
115 define <4 x i32> @v4i32_icmp_ule(<4 x i32> %a, <4 x i32> %b) nounwind readnone ssp uwtable {
116 ; SSE2-LABEL: v4i32_icmp_ule:
118 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
119 ; SSE2-NEXT: pxor %xmm2, %xmm1
120 ; SSE2-NEXT: pxor %xmm2, %xmm0
121 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
122 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
123 ; SSE2-NEXT: pxor %xmm1, %xmm0
126 ; SSE41-LABEL: v4i32_icmp_ule:
128 ; SSE41-NEXT: pminud %xmm0, %xmm1
129 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
132 ; AVX-LABEL: v4i32_icmp_ule:
134 ; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm1
135 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
137 %1 = icmp ule <4 x i32> %a, %b
138 %2 = sext <4 x i1> %1 to <4 x i32>
142 define <16 x i8> @or_icmp_eq_const_1bit_diff(<16 x i8> %x) {
143 ; SSE-LABEL: or_icmp_eq_const_1bit_diff:
145 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43]
146 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1
147 ; SSE-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
148 ; SSE-NEXT: por %xmm1, %xmm0
151 ; AVX-LABEL: or_icmp_eq_const_1bit_diff:
153 ; AVX-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
154 ; AVX-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
155 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
157 %a = icmp eq <16 x i8> %x, <i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43, i8 43>
158 %b = icmp eq <16 x i8> %x, <i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45, i8 45>
159 %ax = sext <16 x i1> %a to <16 x i8>
160 %bx = sext <16 x i1> %b to <16 x i8>
161 %r = or <16 x i8> %ax, %bx
165 define <4 x i32> @or_icmp_ne_const_1bit_diff(<4 x i32> %x) {
166 ; SSE2-LABEL: or_icmp_ne_const_1bit_diff:
168 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [44,60,44,60]
169 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
170 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
171 ; SSE2-NEXT: pxor %xmm2, %xmm1
172 ; SSE2-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
173 ; SSE2-NEXT: pxor %xmm2, %xmm0
174 ; SSE2-NEXT: por %xmm1, %xmm0
177 ; SSE41-LABEL: or_icmp_ne_const_1bit_diff:
179 ; SSE41-NEXT: pmovsxbd {{.*#+}} xmm1 = [44,60,44,60]
180 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
181 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
182 ; SSE41-NEXT: pxor %xmm2, %xmm1
183 ; SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
184 ; SSE41-NEXT: pxor %xmm2, %xmm0
185 ; SSE41-NEXT: por %xmm1, %xmm0
188 ; AVX-LABEL: or_icmp_ne_const_1bit_diff:
190 ; AVX-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
191 ; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
192 ; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
193 ; AVX-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
194 ; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
195 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
197 %a = icmp ne <4 x i32> %x, <i32 44, i32 60, i32 44, i32 60>
198 %b = icmp ne <4 x i32> %x, <i32 60, i32 44, i32 60, i32 44>
199 %ax = sext <4 x i1> %a to <4 x i32>
200 %bx = sext <4 x i1> %b to <4 x i32>
201 %r = or <4 x i32> %ax, %bx
205 define <16 x i8> @and_icmp_eq_const_1bit_diff(<16 x i8> %x) {
206 ; SSE-LABEL: and_icmp_eq_const_1bit_diff:
208 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [43,43,45,45,43,43,45,45,43,43,45,45,43,43,45,45]
209 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1
210 ; SSE-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
211 ; SSE-NEXT: pand %xmm1, %xmm0
214 ; AVX-LABEL: and_icmp_eq_const_1bit_diff:
216 ; AVX-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
217 ; AVX-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
218 ; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
220 %a = icmp eq <16 x i8> %x, <i8 43, i8 43, i8 45, i8 45, i8 43, i8 43, i8 45, i8 45, i8 43, i8 43, i8 45, i8 45, i8 43, i8 43, i8 45, i8 45>
221 %b = icmp eq <16 x i8> %x, <i8 45, i8 45, i8 43, i8 43, i8 45, i8 45, i8 43, i8 43, i8 45, i8 45, i8 43, i8 43, i8 45, i8 45, i8 43, i8 43>
222 %ax = sext <16 x i1> %a to <16 x i8>
223 %bx = sext <16 x i1> %b to <16 x i8>
224 %r = and <16 x i8> %ax, %bx
228 define <4 x i32> @and_icmp_ne_const_1bit_diff(<4 x i32> %x) {
229 ; SSE2-LABEL: and_icmp_ne_const_1bit_diff:
231 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [44,60,54,44]
232 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
233 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
234 ; SSE2-NEXT: pxor %xmm2, %xmm1
235 ; SSE2-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
236 ; SSE2-NEXT: pxor %xmm2, %xmm0
237 ; SSE2-NEXT: por %xmm1, %xmm0
240 ; SSE41-LABEL: and_icmp_ne_const_1bit_diff:
242 ; SSE41-NEXT: pmovsxbd {{.*#+}} xmm1 = [44,60,54,44]
243 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
244 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
245 ; SSE41-NEXT: pxor %xmm2, %xmm1
246 ; SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
247 ; SSE41-NEXT: pxor %xmm2, %xmm0
248 ; SSE41-NEXT: por %xmm1, %xmm0
251 ; AVX-LABEL: and_icmp_ne_const_1bit_diff:
253 ; AVX-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
254 ; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
255 ; AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1
256 ; AVX-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
257 ; AVX-NEXT: vpxor %xmm2, %xmm0, %xmm0
258 ; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
260 %a = icmp ne <4 x i32> %x, <i32 44, i32 60, i32 54, i32 44>
261 %b = icmp ne <4 x i32> %x, <i32 60, i32 52, i32 50, i32 60>
262 %ax = sext <4 x i1> %a to <4 x i32>
263 %bx = sext <4 x i1> %b to <4 x i32>
264 %r = or <4 x i32> %ax, %bx
268 ; At one point we were incorrectly constant-folding a setcc to 0x1 instead of
269 ; 0xff, leading to a constpool load. The instruction doesn't matter here, but it
270 ; should set all bits to 1.
271 define <16 x i8> @test_setcc_constfold_vi8(<16 x i8> %l, <16 x i8> %r) {
272 ; SSE-LABEL: test_setcc_constfold_vi8:
274 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
277 ; AVX-LABEL: test_setcc_constfold_vi8:
279 ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
281 %test1 = icmp eq <16 x i8> %l, %r
282 %mask1 = sext <16 x i1> %test1 to <16 x i8>
283 %test2 = icmp ne <16 x i8> %l, %r
284 %mask2 = sext <16 x i1> %test2 to <16 x i8>
285 %res = or <16 x i8> %mask1, %mask2
289 ; Make sure sensible results come from doing extension afterwards
290 define <16 x i8> @test_setcc_constfold_vi1(<16 x i8> %l, <16 x i8> %r) {
291 ; SSE-LABEL: test_setcc_constfold_vi1:
293 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
296 ; AVX-LABEL: test_setcc_constfold_vi1:
298 ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
300 %test1 = icmp eq <16 x i8> %l, %r
301 %test2 = icmp ne <16 x i8> %l, %r
302 %res = or <16 x i1> %test1, %test2
303 %mask = sext <16 x i1> %res to <16 x i8>
307 ; 64-bit case is also particularly important, as the constant "-1" is probably
309 define <2 x i64> @test_setcc_constfold_vi64(<2 x i64> %l, <2 x i64> %r) {
310 ; SSE-LABEL: test_setcc_constfold_vi64:
312 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
315 ; AVX-LABEL: test_setcc_constfold_vi64:
317 ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
319 %test1 = icmp eq <2 x i64> %l, %r
320 %mask1 = sext <2 x i1> %test1 to <2 x i64>
321 %test2 = icmp ne <2 x i64> %l, %r
322 %mask2 = sext <2 x i1> %test2 to <2 x i64>
323 %res = or <2 x i64> %mask1, %mask2
327 ; This asserted in type legalization for v3i1 setcc after v3i16 was made
328 ; a simple value type.
329 define <3 x i1> @test_setcc_v3i1_v3i16(ptr %a) nounwind {
330 ; SSE2-LABEL: test_setcc_v3i1_v3i16:
332 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
333 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
334 ; SSE2-NEXT: pxor %xmm1, %xmm1
335 ; SSE2-NEXT: pcmpeqw %xmm0, %xmm1
336 ; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
337 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
338 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
339 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
342 ; SSE41-LABEL: test_setcc_v3i1_v3i16:
344 ; SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
345 ; SSE41-NEXT: pxor %xmm1, %xmm1
346 ; SSE41-NEXT: pcmpeqw %xmm0, %xmm1
347 ; SSE41-NEXT: movd %xmm1, %eax
348 ; SSE41-NEXT: pextrb $2, %xmm1, %edx
349 ; SSE41-NEXT: pextrb $4, %xmm1, %ecx
350 ; SSE41-NEXT: # kill: def $al killed $al killed $eax
351 ; SSE41-NEXT: # kill: def $dl killed $dl killed $edx
352 ; SSE41-NEXT: # kill: def $cl killed $cl killed $ecx
355 ; AVX-LABEL: test_setcc_v3i1_v3i16:
357 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
358 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
359 ; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
360 ; AVX-NEXT: vmovd %xmm0, %eax
361 ; AVX-NEXT: vpextrb $2, %xmm0, %edx
362 ; AVX-NEXT: vpextrb $4, %xmm0, %ecx
363 ; AVX-NEXT: # kill: def $al killed $al killed $eax
364 ; AVX-NEXT: # kill: def $dl killed $dl killed $edx
365 ; AVX-NEXT: # kill: def $cl killed $cl killed $ecx
367 %b = load <3 x i16>, ptr %a
368 %cmp = icmp eq <3 x i16> %b, <i16 0, i16 0, i16 0>