1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -o - -mcpu=generic -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
3 ; RUN: llc < %s -o - -mcpu=generic -mtriple=x86_64-apple-darwin -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE41
5 ; For a setult against a constant, turn it into a setule and lower via psubusw.
7 define void @loop_no_const_reload(<2 x i64>* %in, <2 x i64>* %out, i32 %n) {
8 ; SSE2-LABEL: loop_no_const_reload:
9 ; SSE2: ## %bb.0: ## %entry
10 ; SSE2-NEXT: testl %edx, %edx
11 ; SSE2-NEXT: je LBB0_3
12 ; SSE2-NEXT: ## %bb.1: ## %for.body.preheader
13 ; SSE2-NEXT: xorl %eax, %eax
14 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [25,25,25,25,25,25,25,25]
15 ; SSE2-NEXT: pxor %xmm1, %xmm1
16 ; SSE2-NEXT: .p2align 4, 0x90
17 ; SSE2-NEXT: LBB0_2: ## %for.body
18 ; SSE2-NEXT: ## =>This Inner Loop Header: Depth=1
19 ; SSE2-NEXT: movdqa (%rdi,%rax), %xmm2
20 ; SSE2-NEXT: psubusw %xmm0, %xmm2
21 ; SSE2-NEXT: pcmpeqw %xmm1, %xmm2
22 ; SSE2-NEXT: movdqa %xmm2, (%rsi,%rax)
23 ; SSE2-NEXT: addq $16, %rax
24 ; SSE2-NEXT: decl %edx
25 ; SSE2-NEXT: jne LBB0_2
26 ; SSE2-NEXT: LBB0_3: ## %for.end
29 ; SSE41-LABEL: loop_no_const_reload:
30 ; SSE41: ## %bb.0: ## %entry
31 ; SSE41-NEXT: testl %edx, %edx
32 ; SSE41-NEXT: je LBB0_3
33 ; SSE41-NEXT: ## %bb.1: ## %for.body.preheader
34 ; SSE41-NEXT: xorl %eax, %eax
35 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [25,25,25,25,25,25,25,25]
36 ; SSE41-NEXT: .p2align 4, 0x90
37 ; SSE41-NEXT: LBB0_2: ## %for.body
38 ; SSE41-NEXT: ## =>This Inner Loop Header: Depth=1
39 ; SSE41-NEXT: movdqa (%rdi,%rax), %xmm1
40 ; SSE41-NEXT: movdqa %xmm1, %xmm2
41 ; SSE41-NEXT: pminuw %xmm0, %xmm2
42 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm2
43 ; SSE41-NEXT: movdqa %xmm2, (%rsi,%rax)
44 ; SSE41-NEXT: addq $16, %rax
45 ; SSE41-NEXT: decl %edx
46 ; SSE41-NEXT: jne LBB0_2
47 ; SSE41-NEXT: LBB0_3: ## %for.end
50 %cmp9 = icmp eq i32 %n, 0
51 br i1 %cmp9, label %for.end, label %for.body
53 for.body: ; preds = %for.body, %entry
54 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
55 %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %in, i64 %indvars.iv
56 %arrayidx1.val = load <2 x i64>, <2 x i64>* %arrayidx1, align 16
57 %0 = bitcast <2 x i64> %arrayidx1.val to <8 x i16>
58 %cmp.i.i = icmp ult <8 x i16> %0, <i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26>
59 %sext.i.i = sext <8 x i1> %cmp.i.i to <8 x i16>
60 %1 = bitcast <8 x i16> %sext.i.i to <2 x i64>
61 %arrayidx5 = getelementptr inbounds <2 x i64>, <2 x i64>* %out, i64 %indvars.iv
62 store <2 x i64> %1, <2 x i64>* %arrayidx5, align 16
63 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
64 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
65 %exitcond = icmp eq i32 %lftr.wideiv, %n
66 br i1 %exitcond, label %for.end, label %for.body
68 for.end: ; preds = %for.body, %entry
72 ; Be careful if decrementing the constant would undeflow.
74 define void @loop_const_folding_underflow(<2 x i64>* %in, <2 x i64>* %out, i32 %n) {
75 ; SSE2-LABEL: loop_const_folding_underflow:
76 ; SSE2: ## %bb.0: ## %entry
77 ; SSE2-NEXT: testl %edx, %edx
78 ; SSE2-NEXT: je LBB1_3
79 ; SSE2-NEXT: ## %bb.1: ## %for.body.preheader
80 ; SSE2-NEXT: xorl %eax, %eax
81 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [32768,32768,32768,32768,32768,32768,32768,32768]
82 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32768,32794,32794,32794,32794,32794,32794,32794]
83 ; SSE2-NEXT: .p2align 4, 0x90
84 ; SSE2-NEXT: LBB1_2: ## %for.body
85 ; SSE2-NEXT: ## =>This Inner Loop Header: Depth=1
86 ; SSE2-NEXT: movdqa (%rdi,%rax), %xmm2
87 ; SSE2-NEXT: pxor %xmm0, %xmm2
88 ; SSE2-NEXT: movdqa %xmm1, %xmm3
89 ; SSE2-NEXT: pcmpgtw %xmm2, %xmm3
90 ; SSE2-NEXT: movdqa %xmm3, (%rsi,%rax)
91 ; SSE2-NEXT: addq $16, %rax
92 ; SSE2-NEXT: decl %edx
93 ; SSE2-NEXT: jne LBB1_2
94 ; SSE2-NEXT: LBB1_3: ## %for.end
97 ; SSE41-LABEL: loop_const_folding_underflow:
98 ; SSE41: ## %bb.0: ## %entry
99 ; SSE41-NEXT: testl %edx, %edx
100 ; SSE41-NEXT: je LBB1_3
101 ; SSE41-NEXT: ## %bb.1: ## %for.body.preheader
102 ; SSE41-NEXT: xorl %eax, %eax
103 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,26,26,26,26,26,26,26]
104 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
105 ; SSE41-NEXT: .p2align 4, 0x90
106 ; SSE41-NEXT: LBB1_2: ## %for.body
107 ; SSE41-NEXT: ## =>This Inner Loop Header: Depth=1
108 ; SSE41-NEXT: movdqa (%rdi,%rax), %xmm2
109 ; SSE41-NEXT: movdqa %xmm2, %xmm3
110 ; SSE41-NEXT: pmaxuw %xmm0, %xmm3
111 ; SSE41-NEXT: pcmpeqw %xmm2, %xmm3
112 ; SSE41-NEXT: pxor %xmm1, %xmm3
113 ; SSE41-NEXT: movdqa %xmm3, (%rsi,%rax)
114 ; SSE41-NEXT: addq $16, %rax
115 ; SSE41-NEXT: decl %edx
116 ; SSE41-NEXT: jne LBB1_2
117 ; SSE41-NEXT: LBB1_3: ## %for.end
120 %cmp9 = icmp eq i32 %n, 0
121 br i1 %cmp9, label %for.end, label %for.body
123 for.body: ; preds = %for.body, %entry
124 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
125 %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %in, i64 %indvars.iv
126 %arrayidx1.val = load <2 x i64>, <2 x i64>* %arrayidx1, align 16
127 %0 = bitcast <2 x i64> %arrayidx1.val to <8 x i16>
128 %cmp.i.i = icmp ult <8 x i16> %0, <i16 0, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26>
129 %sext.i.i = sext <8 x i1> %cmp.i.i to <8 x i16>
130 %1 = bitcast <8 x i16> %sext.i.i to <2 x i64>
131 %arrayidx5 = getelementptr inbounds <2 x i64>, <2 x i64>* %out, i64 %indvars.iv
132 store <2 x i64> %1, <2 x i64>* %arrayidx5, align 16
133 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
134 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
135 %exitcond = icmp eq i32 %lftr.wideiv, %n
136 br i1 %exitcond, label %for.end, label %for.body
138 for.end: ; preds = %for.body, %entry
144 define <16 x i8> @test_ult_byte(<16 x i8> %a) {
145 ; CHECK-LABEL: test_ult_byte:
146 ; CHECK: ## %bb.0: ## %entry
147 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10]
148 ; CHECK-NEXT: pminub %xmm0, %xmm1
149 ; CHECK-NEXT: pcmpeqb %xmm1, %xmm0
152 %icmp = icmp ult <16 x i8> %a, <i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11, i8 11>
153 %sext = sext <16 x i1> %icmp to <16 x i8>
157 ; Only do this when we can turn the comparison into a setule. I.e. not for
160 define <8 x i16> @test_ult_register(<8 x i16> %a, <8 x i16> %b) {
161 ; SSE2-LABEL: test_ult_register:
162 ; SSE2: ## %bb.0: ## %entry
163 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
164 ; SSE2-NEXT: pxor %xmm2, %xmm0
165 ; SSE2-NEXT: pxor %xmm1, %xmm2
166 ; SSE2-NEXT: pcmpgtw %xmm0, %xmm2
167 ; SSE2-NEXT: movdqa %xmm2, %xmm0
170 ; SSE41-LABEL: test_ult_register:
171 ; SSE41: ## %bb.0: ## %entry
172 ; SSE41-NEXT: pmaxuw %xmm0, %xmm1
173 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
174 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
175 ; SSE41-NEXT: pxor %xmm1, %xmm0
178 %icmp = icmp ult <8 x i16> %a, %b
179 %sext = sext <8 x i1> %icmp to <8 x i16>
183 define <16 x i1> @ugt_v16i8_splat(<16 x i8> %x) {
184 ; CHECK-LABEL: ugt_v16i8_splat:
186 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43]
187 ; CHECK-NEXT: pmaxub %xmm0, %xmm1
188 ; CHECK-NEXT: pcmpeqb %xmm1, %xmm0
190 %cmp = icmp ugt <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
194 define <8 x i1> @ugt_v8i16_splat(<8 x i16> %x) {
195 ; SSE2-LABEL: ugt_v8i16_splat:
197 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
198 ; SSE2-NEXT: pcmpgtw {{.*}}(%rip), %xmm0
201 ; SSE41-LABEL: ugt_v8i16_splat:
203 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [243,243,243,243,243,243,243,243]
204 ; SSE41-NEXT: pmaxuw %xmm0, %xmm1
205 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
207 %cmp = icmp ugt <8 x i16> %x, <i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242>
211 define <4 x i1> @ugt_v4i32_splat(<4 x i32> %x) {
212 ; SSE2-LABEL: ugt_v4i32_splat:
214 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
215 ; SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
218 ; SSE41-LABEL: ugt_v4i32_splat:
220 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4294967255,4294967255,4294967255,4294967255]
221 ; SSE41-NEXT: pmaxud %xmm0, %xmm1
222 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
224 %cmp = icmp ugt <4 x i32> %x, <i32 -42, i32 -42, i32 -42, i32 -42>
228 define <2 x i1> @ugt_v2i64_splat(<2 x i64> %x) {
229 ; SSE2-LABEL: ugt_v2i64_splat:
231 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
232 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259898,9223372039002259898]
233 ; SSE2-NEXT: movdqa %xmm0, %xmm2
234 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
235 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
236 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
237 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
238 ; SSE2-NEXT: pand %xmm3, %xmm1
239 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
240 ; SSE2-NEXT: por %xmm1, %xmm0
243 ; SSE41-LABEL: ugt_v2i64_splat:
245 ; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
246 ; SSE41-NEXT: pcmpgtq {{.*}}(%rip), %xmm0
248 %cmp = icmp ugt <2 x i64> %x, <i64 442, i64 442>
252 define <16 x i1> @uge_v16i8_splat(<16 x i8> %x) {
253 ; CHECK-LABEL: uge_v16i8_splat:
255 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
256 ; CHECK-NEXT: pmaxub %xmm0, %xmm1
257 ; CHECK-NEXT: pcmpeqb %xmm1, %xmm0
259 %cmp = icmp uge <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
263 define <8 x i1> @uge_v8i16_splat(<8 x i16> %x) {
264 ; SSE2-LABEL: uge_v8i16_splat:
266 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [242,242,242,242,242,242,242,242]
267 ; SSE2-NEXT: psubusw %xmm0, %xmm1
268 ; SSE2-NEXT: pxor %xmm0, %xmm0
269 ; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
272 ; SSE41-LABEL: uge_v8i16_splat:
274 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [242,242,242,242,242,242,242,242]
275 ; SSE41-NEXT: pmaxuw %xmm0, %xmm1
276 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
278 %cmp = icmp uge <8 x i16> %x, <i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242>
282 define <4 x i1> @uge_v4i32_splat(<4 x i32> %x) {
283 ; SSE2-LABEL: uge_v4i32_splat:
285 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
286 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2147483606,2147483606,2147483606,2147483606]
287 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
288 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
289 ; SSE2-NEXT: pxor %xmm1, %xmm0
292 ; SSE41-LABEL: uge_v4i32_splat:
294 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4294967254,4294967254,4294967254,4294967254]
295 ; SSE41-NEXT: pmaxud %xmm0, %xmm1
296 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
298 %cmp = icmp uge <4 x i32> %x, <i32 -42, i32 -42, i32 -42, i32 -42>
302 define <2 x i1> @uge_v2i64_splat(<2 x i64> %x) {
303 ; SSE2-LABEL: uge_v2i64_splat:
305 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
306 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259898,9223372039002259898]
307 ; SSE2-NEXT: movdqa %xmm1, %xmm2
308 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
309 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
310 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
311 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
312 ; SSE2-NEXT: pand %xmm3, %xmm0
313 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
314 ; SSE2-NEXT: por %xmm0, %xmm1
315 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
316 ; SSE2-NEXT: pxor %xmm1, %xmm0
319 ; SSE41-LABEL: uge_v2i64_splat:
321 ; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
322 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [9223372036854776250,9223372036854776250]
323 ; SSE41-NEXT: pcmpgtq %xmm0, %xmm1
324 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
325 ; SSE41-NEXT: pxor %xmm1, %xmm0
327 %cmp = icmp uge <2 x i64> %x, <i64 442, i64 442>
331 define <16 x i1> @ult_v16i8_splat(<16 x i8> %x) {
332 ; CHECK-LABEL: ult_v16i8_splat:
334 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41]
335 ; CHECK-NEXT: pminub %xmm0, %xmm1
336 ; CHECK-NEXT: pcmpeqb %xmm1, %xmm0
338 %cmp = icmp ult <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
342 define <8 x i1> @ult_v8i16_splat(<8 x i16> %x) {
343 ; SSE2-LABEL: ult_v8i16_splat:
345 ; SSE2-NEXT: psubusw {{.*}}(%rip), %xmm0
346 ; SSE2-NEXT: pxor %xmm1, %xmm1
347 ; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
350 ; SSE41-LABEL: ult_v8i16_splat:
352 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [241,241,241,241,241,241,241,241]
353 ; SSE41-NEXT: pminuw %xmm0, %xmm1
354 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
356 %cmp = icmp ult <8 x i16> %x, <i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242>
360 define <4 x i1> @ult_v4i32_splat(<4 x i32> %x) {
361 ; SSE2-LABEL: ult_v4i32_splat:
363 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
364 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2147483606,2147483606,2147483606,2147483606]
365 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
366 ; SSE2-NEXT: movdqa %xmm1, %xmm0
369 ; SSE41-LABEL: ult_v4i32_splat:
371 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4294967253,4294967253,4294967253,4294967253]
372 ; SSE41-NEXT: pminud %xmm0, %xmm1
373 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
375 %cmp = icmp ult <4 x i32> %x, <i32 -42, i32 -42, i32 -42, i32 -42>
379 define <2 x i1> @ult_v2i64_splat(<2 x i64> %x) {
380 ; SSE2-LABEL: ult_v2i64_splat:
382 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
383 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259898,9223372039002259898]
384 ; SSE2-NEXT: movdqa %xmm1, %xmm2
385 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
386 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
387 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
388 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
389 ; SSE2-NEXT: pand %xmm3, %xmm1
390 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
391 ; SSE2-NEXT: por %xmm1, %xmm0
394 ; SSE41-LABEL: ult_v2i64_splat:
396 ; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
397 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [9223372036854776250,9223372036854776250]
398 ; SSE41-NEXT: pcmpgtq %xmm0, %xmm1
399 ; SSE41-NEXT: movdqa %xmm1, %xmm0
401 %cmp = icmp ult <2 x i64> %x, <i64 442, i64 442>
405 define <16 x i1> @ule_v16i8_splat(<16 x i8> %x) {
406 ; CHECK-LABEL: ule_v16i8_splat:
408 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42]
409 ; CHECK-NEXT: pminub %xmm0, %xmm1
410 ; CHECK-NEXT: pcmpeqb %xmm1, %xmm0
412 %cmp = icmp ule <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
416 define <8 x i1> @ule_v8i16_splat(<8 x i16> %x) {
417 ; SSE2-LABEL: ule_v8i16_splat:
419 ; SSE2-NEXT: psubusw {{.*}}(%rip), %xmm0
420 ; SSE2-NEXT: pxor %xmm1, %xmm1
421 ; SSE2-NEXT: pcmpeqw %xmm1, %xmm0
424 ; SSE41-LABEL: ule_v8i16_splat:
426 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [242,242,242,242,242,242,242,242]
427 ; SSE41-NEXT: pminuw %xmm0, %xmm1
428 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
430 %cmp = icmp ule <8 x i16> %x, <i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242, i16 242>
434 define <4 x i1> @ule_v4i32_splat(<4 x i32> %x) {
435 ; SSE2-LABEL: ule_v4i32_splat:
437 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
438 ; SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
439 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
440 ; SSE2-NEXT: pxor %xmm1, %xmm0
443 ; SSE41-LABEL: ule_v4i32_splat:
445 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4294967254,4294967254,4294967254,4294967254]
446 ; SSE41-NEXT: pminud %xmm0, %xmm1
447 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
449 %cmp = icmp ule <4 x i32> %x, <i32 -42, i32 -42, i32 -42, i32 -42>
453 define <2 x i1> @ule_v2i64_splat(<2 x i64> %x) {
454 ; SSE2-LABEL: ule_v2i64_splat:
456 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
457 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [9223372039002259898,9223372039002259898]
458 ; SSE2-NEXT: movdqa %xmm0, %xmm2
459 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
460 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
461 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
462 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
463 ; SSE2-NEXT: pand %xmm3, %xmm0
464 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
465 ; SSE2-NEXT: por %xmm0, %xmm1
466 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
467 ; SSE2-NEXT: pxor %xmm1, %xmm0
470 ; SSE41-LABEL: ule_v2i64_splat:
472 ; SSE41-NEXT: pxor {{.*}}(%rip), %xmm0
473 ; SSE41-NEXT: pcmpgtq {{.*}}(%rip), %xmm0
474 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
475 ; SSE41-NEXT: pxor %xmm1, %xmm0
477 %cmp = icmp ule <2 x i64> %x, <i64 442, i64 442>
481 ; This should be simplified before we reach lowering, but
482 ; make sure that we are not getting it wrong by underflowing.
484 define <4 x i1> @ult_v4i32_splat_0_simplify(<4 x i32> %x) {
485 ; CHECK-LABEL: ult_v4i32_splat_0_simplify:
487 ; CHECK-NEXT: xorps %xmm0, %xmm0
489 %cmp = icmp ult <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
493 ; This should be simplified before we reach lowering, but
494 ; make sure that we are not getting it wrong by overflowing.
496 define <4 x i1> @ugt_v4i32_splat_maxval_simplify(<4 x i32> %x) {
497 ; CHECK-LABEL: ugt_v4i32_splat_maxval_simplify:
499 ; CHECK-NEXT: xorps %xmm0, %xmm0
501 %cmp = icmp ugt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
505 define <4 x i1> @ugt_v4i32_nonsplat(<4 x i32> %x) {
506 ; SSE2-LABEL: ugt_v4i32_nonsplat:
508 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
509 ; SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
512 ; SSE41-LABEL: ugt_v4i32_nonsplat:
514 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [4294967254,4294967255,4294967256,4294967257]
515 ; SSE41-NEXT: pmaxud %xmm0, %xmm1
516 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
518 %cmp = icmp ugt <4 x i32> %x, <i32 -43, i32 -42, i32 -41, i32 -40>
522 define <4 x i1> @ugt_v4i32_splat_commute(<4 x i32> %x) {
523 ; SSE2-LABEL: ugt_v4i32_splat_commute:
525 ; SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
526 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2147483652,2147483652,2147483652,2147483652]
527 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
528 ; SSE2-NEXT: movdqa %xmm1, %xmm0
531 ; SSE41-LABEL: ugt_v4i32_splat_commute:
533 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [3,3,3,3]
534 ; SSE41-NEXT: pminud %xmm0, %xmm1
535 ; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
537 %cmp = icmp ugt <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %x
541 define <8 x i16> @PR39859(<8 x i16> %x, <8 x i16> %y) {
542 ; SSE2-LABEL: PR39859:
544 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
545 ; SSE2-NEXT: pxor %xmm0, %xmm2
546 ; SSE2-NEXT: pcmpgtw {{.*}}(%rip), %xmm2
547 ; SSE2-NEXT: pand %xmm2, %xmm1
548 ; SSE2-NEXT: pandn %xmm0, %xmm2
549 ; SSE2-NEXT: por %xmm1, %xmm2
550 ; SSE2-NEXT: movdqa %xmm2, %xmm0
553 ; SSE41-LABEL: PR39859:
555 ; SSE41-NEXT: movdqa %xmm0, %xmm2
556 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [43,43,43,43,43,43,43,43]
557 ; SSE41-NEXT: pmaxuw %xmm2, %xmm0
558 ; SSE41-NEXT: pcmpeqw %xmm2, %xmm0
559 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
560 ; SSE41-NEXT: movdqa %xmm2, %xmm0
562 %cmp = icmp ugt <8 x i16> %x, <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42>
563 %sel = select <8 x i1> %cmp, <8 x i16> %y, <8 x i16> %x