1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
6 ; fold (urem x, 1) -> 0
7 define i32 @combine_urem_by_one(i32 %x) {
8 ; CHECK-LABEL: combine_urem_by_one:
10 ; CHECK-NEXT: xorl %eax, %eax
16 define <4 x i32> @combine_vec_urem_by_one(<4 x i32> %x) {
17 ; SSE-LABEL: combine_vec_urem_by_one:
19 ; SSE-NEXT: xorps %xmm0, %xmm0
22 ; AVX-LABEL: combine_vec_urem_by_one:
24 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
26 %1 = urem <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
30 ; fold (urem x, -1) -> select((icmp eq x, -1), 0, x)
31 define i32 @combine_urem_by_negone(i32 %x) {
32 ; CHECK-LABEL: combine_urem_by_negone:
34 ; CHECK-NEXT: xorl %eax, %eax
35 ; CHECK-NEXT: cmpl $-1, %edi
36 ; CHECK-NEXT: cmovnel %edi, %eax
42 define <4 x i32> @combine_vec_urem_by_negone(<4 x i32> %x) {
43 ; SSE-LABEL: combine_vec_urem_by_negone:
45 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
46 ; SSE-NEXT: pcmpeqd %xmm0, %xmm1
47 ; SSE-NEXT: pandn %xmm0, %xmm1
48 ; SSE-NEXT: movdqa %xmm1, %xmm0
51 ; AVX-LABEL: combine_vec_urem_by_negone:
53 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
54 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm1
55 ; AVX-NEXT: vpandn %xmm0, %xmm1, %xmm0
57 %1 = urem <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
61 ; Use PSLLI intrinsic to postpone the undef creation until after urem-by-constant expansion
63 define <4 x i32> @combine_vec_urem_undef_by_negone(<4 x i32> %in) {
64 ; SSE-LABEL: combine_vec_urem_undef_by_negone:
66 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
67 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0
68 ; SSE-NEXT: pandn %xmm0, %xmm0
71 ; AVX-LABEL: combine_vec_urem_undef_by_negone:
73 ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
74 ; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
75 ; AVX-NEXT: vpandn %xmm0, %xmm0, %xmm0
77 %x = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> undef, i32 0)
78 %y = urem <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
82 ; fold (urem x, INT_MIN) -> (and x, ~INT_MIN)
83 define i32 @combine_urem_by_minsigned(i32 %x) {
84 ; CHECK-LABEL: combine_urem_by_minsigned:
86 ; CHECK-NEXT: movl %edi, %eax
87 ; CHECK-NEXT: andl $2147483647, %eax # imm = 0x7FFFFFFF
89 %1 = urem i32 %x, -2147483648
93 define <4 x i32> @combine_vec_urem_by_minsigned(<4 x i32> %x) {
94 ; SSE-LABEL: combine_vec_urem_by_minsigned:
96 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
99 ; AVX1-LABEL: combine_vec_urem_by_minsigned:
101 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
104 ; AVX2-LABEL: combine_vec_urem_by_minsigned:
106 ; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [2147483647,2147483647,2147483647,2147483647]
107 ; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
109 %1 = urem <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
113 ; fold (urem 0, x) -> 0
114 define i32 @combine_urem_zero(i32 %x) {
115 ; CHECK-LABEL: combine_urem_zero:
117 ; CHECK-NEXT: xorl %eax, %eax
123 define <4 x i32> @combine_vec_urem_zero(<4 x i32> %x) {
124 ; SSE-LABEL: combine_vec_urem_zero:
126 ; SSE-NEXT: xorps %xmm0, %xmm0
129 ; AVX-LABEL: combine_vec_urem_zero:
131 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
133 %1 = urem <4 x i32> zeroinitializer, %x
137 ; fold (urem x, x) -> 0
138 define i32 @combine_urem_dupe(i32 %x) {
139 ; CHECK-LABEL: combine_urem_dupe:
141 ; CHECK-NEXT: xorl %eax, %eax
147 define <4 x i32> @combine_vec_urem_dupe(<4 x i32> %x) {
148 ; SSE-LABEL: combine_vec_urem_dupe:
150 ; SSE-NEXT: xorps %xmm0, %xmm0
153 ; AVX-LABEL: combine_vec_urem_dupe:
155 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
157 %1 = urem <4 x i32> %x, %x
161 ; fold (urem x, pow2) -> (and x, (pow2-1))
162 define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) {
163 ; SSE-LABEL: combine_vec_urem_by_pow2a:
165 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
168 ; AVX1-LABEL: combine_vec_urem_by_pow2a:
170 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
173 ; AVX2-LABEL: combine_vec_urem_by_pow2a:
175 ; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [3,3,3,3]
176 ; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
178 %1 = urem <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
182 define <4 x i32> @combine_vec_urem_by_pow2b(<4 x i32> %x) {
183 ; SSE-LABEL: combine_vec_urem_by_pow2b:
185 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
188 ; AVX-LABEL: combine_vec_urem_by_pow2b:
190 ; AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
192 %1 = urem <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
196 define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) {
197 ; SSE-LABEL: combine_vec_urem_by_pow2c:
199 ; SSE-NEXT: pslld $23, %xmm1
200 ; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
201 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1
202 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2
203 ; SSE-NEXT: paddd %xmm1, %xmm2
204 ; SSE-NEXT: pand %xmm2, %xmm0
207 ; AVX1-LABEL: combine_vec_urem_by_pow2c:
209 ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
210 ; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
211 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
212 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
213 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
214 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
217 ; AVX2-LABEL: combine_vec_urem_by_pow2c:
219 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
220 ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
221 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
222 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
223 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
225 %1 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
226 %2 = urem <4 x i32> %x, %1
230 define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) {
231 ; SSE-LABEL: combine_vec_urem_by_pow2d:
233 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
234 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
235 ; SSE-NEXT: movdqa %xmm3, %xmm4
236 ; SSE-NEXT: psrld %xmm2, %xmm4
237 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
238 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[2,3,3,3,4,5,6,7]
239 ; SSE-NEXT: movdqa %xmm3, %xmm6
240 ; SSE-NEXT: psrld %xmm5, %xmm6
241 ; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
242 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
243 ; SSE-NEXT: movdqa %xmm3, %xmm4
244 ; SSE-NEXT: psrld %xmm1, %xmm4
245 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
246 ; SSE-NEXT: psrld %xmm1, %xmm3
247 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
248 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5],xmm6[6,7]
249 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
250 ; SSE-NEXT: paddd %xmm3, %xmm1
251 ; SSE-NEXT: pand %xmm1, %xmm0
254 ; AVX1-LABEL: combine_vec_urem_by_pow2d:
256 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
257 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
258 ; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm2
259 ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
260 ; AVX1-NEXT: vpsrld %xmm4, %xmm3, %xmm4
261 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
262 ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
263 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
264 ; AVX1-NEXT: vpsrld %xmm4, %xmm3, %xmm4
265 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
266 ; AVX1-NEXT: vpsrld %xmm1, %xmm3, %xmm1
267 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
268 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
269 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
270 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
271 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
274 ; AVX2-LABEL: combine_vec_urem_by_pow2d:
276 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
277 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1
278 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
279 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
280 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
282 %1 = lshr <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, %y
283 %2 = urem <4 x i32> %x, %1
287 ; fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
288 define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
289 ; SSE-LABEL: combine_vec_urem_by_shl_pow2a:
291 ; SSE-NEXT: pslld $23, %xmm1
292 ; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
293 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1
294 ; SSE-NEXT: pslld $2, %xmm1
295 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2
296 ; SSE-NEXT: paddd %xmm1, %xmm2
297 ; SSE-NEXT: pand %xmm2, %xmm0
300 ; AVX1-LABEL: combine_vec_urem_by_shl_pow2a:
302 ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
303 ; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
304 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
305 ; AVX1-NEXT: vpslld $2, %xmm1, %xmm1
306 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
307 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
308 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
311 ; AVX2-LABEL: combine_vec_urem_by_shl_pow2a:
313 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4,4,4,4]
314 ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
315 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
316 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
317 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
319 %1 = shl <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %y
320 %2 = urem <4 x i32> %x, %1
324 define <4 x i32> @combine_vec_urem_by_shl_pow2b(<4 x i32> %x, <4 x i32> %y) {
325 ; SSE-LABEL: combine_vec_urem_by_shl_pow2b:
327 ; SSE-NEXT: pslld $23, %xmm1
328 ; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
329 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1
330 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
331 ; SSE-NEXT: pcmpeqd %xmm2, %xmm2
332 ; SSE-NEXT: paddd %xmm1, %xmm2
333 ; SSE-NEXT: pand %xmm2, %xmm0
336 ; AVX1-LABEL: combine_vec_urem_by_shl_pow2b:
338 ; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
339 ; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
340 ; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
341 ; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
342 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
343 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
344 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
347 ; AVX2-LABEL: combine_vec_urem_by_shl_pow2b:
349 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16]
350 ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
351 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
352 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
353 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
355 %1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y
356 %2 = urem <4 x i32> %x, %1
360 ; fold (urem x, (lshr pow2, y)) -> (and x, (add (lshr pow2, y), -1))
361 define <4 x i32> @combine_vec_urem_by_lshr_pow2a(<4 x i32> %x, <4 x i32> %y) {
362 ; SSE-LABEL: combine_vec_urem_by_lshr_pow2a:
364 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
365 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,4,4,4]
366 ; SSE-NEXT: movdqa %xmm3, %xmm4
367 ; SSE-NEXT: psrld %xmm2, %xmm4
368 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
369 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[2,3,3,3,4,5,6,7]
370 ; SSE-NEXT: movdqa %xmm3, %xmm6
371 ; SSE-NEXT: psrld %xmm5, %xmm6
372 ; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
373 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
374 ; SSE-NEXT: movdqa %xmm3, %xmm4
375 ; SSE-NEXT: psrld %xmm1, %xmm4
376 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
377 ; SSE-NEXT: psrld %xmm1, %xmm3
378 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
379 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5],xmm6[6,7]
380 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
381 ; SSE-NEXT: paddd %xmm3, %xmm1
382 ; SSE-NEXT: pand %xmm1, %xmm0
385 ; AVX1-LABEL: combine_vec_urem_by_lshr_pow2a:
387 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
388 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [4,4,4,4]
389 ; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm2
390 ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
391 ; AVX1-NEXT: vpsrld %xmm4, %xmm3, %xmm4
392 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
393 ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
394 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
395 ; AVX1-NEXT: vpsrld %xmm4, %xmm3, %xmm4
396 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
397 ; AVX1-NEXT: vpsrld %xmm1, %xmm3, %xmm1
398 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
399 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
400 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
401 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
402 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
405 ; AVX2-LABEL: combine_vec_urem_by_lshr_pow2a:
407 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4,4,4,4]
408 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1
409 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
410 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
411 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
413 %1 = lshr <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %y
414 %2 = urem <4 x i32> %x, %1
418 define <4 x i32> @combine_vec_urem_by_lshr_pow2b(<4 x i32> %x, <4 x i32> %y) {
419 ; SSE-LABEL: combine_vec_urem_by_lshr_pow2b:
421 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
422 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [1,4,8,16]
423 ; SSE-NEXT: movdqa %xmm3, %xmm4
424 ; SSE-NEXT: psrld %xmm2, %xmm4
425 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
426 ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[2,3,3,3,4,5,6,7]
427 ; SSE-NEXT: movdqa %xmm3, %xmm6
428 ; SSE-NEXT: psrld %xmm5, %xmm6
429 ; SSE-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
430 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
431 ; SSE-NEXT: movdqa %xmm3, %xmm4
432 ; SSE-NEXT: psrld %xmm1, %xmm4
433 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
434 ; SSE-NEXT: psrld %xmm1, %xmm3
435 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
436 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5],xmm6[6,7]
437 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
438 ; SSE-NEXT: paddd %xmm3, %xmm1
439 ; SSE-NEXT: pand %xmm1, %xmm0
442 ; AVX1-LABEL: combine_vec_urem_by_lshr_pow2b:
444 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
445 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,4,8,16]
446 ; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm2
447 ; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
448 ; AVX1-NEXT: vpsrld %xmm4, %xmm3, %xmm4
449 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
450 ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
451 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
452 ; AVX1-NEXT: vpsrld %xmm4, %xmm3, %xmm4
453 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
454 ; AVX1-NEXT: vpsrld %xmm1, %xmm3, %xmm1
455 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
456 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
457 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
458 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
459 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
462 ; AVX2-LABEL: combine_vec_urem_by_lshr_pow2b:
464 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,8,16]
465 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1
466 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
467 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
468 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
470 %1 = lshr <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y
471 %2 = urem <4 x i32> %x, %1
475 ; FIXME: PR55271 - urem(undef, 3) != undef
476 ; Use PSLLI intrinsic to postpone the undef creation until after urem-by-constant expansion
477 define <4 x i32> @combine_vec_urem_undef_by_3(<4 x i32> %in) {
478 ; CHECK-LABEL: combine_vec_urem_undef_by_3:
481 %x = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> undef, i32 0)
482 %y = urem <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
485 declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32)
487 define i1 @bool_urem(i1 %x, i1 %y) {
488 ; CHECK-LABEL: bool_urem:
490 ; CHECK-NEXT: xorl %eax, %eax
496 define <4 x i1> @boolvec_urem(<4 x i1> %x, <4 x i1> %y) {
497 ; SSE-LABEL: boolvec_urem:
499 ; SSE-NEXT: xorps %xmm0, %xmm0
502 ; AVX-LABEL: boolvec_urem:
504 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
506 %r = urem <4 x i1> %x, %y