1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
6 ; fold (srem x, 1) -> 0
7 define i32 @combine_srem_by_one(i32 %x) {
8 ; CHECK-LABEL: combine_srem_by_one:
10 ; CHECK-NEXT: xorl %eax, %eax
16 define <4 x i32> @combine_vec_srem_by_one(<4 x i32> %x) {
17 ; SSE-LABEL: combine_vec_srem_by_one:
19 ; SSE-NEXT: xorps %xmm0, %xmm0
22 ; AVX-LABEL: combine_vec_srem_by_one:
24 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
26 %1 = srem <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
30 ; fold (srem x, -1) -> 0
31 define i32 @combine_srem_by_negone(i32 %x) {
32 ; CHECK-LABEL: combine_srem_by_negone:
34 ; CHECK-NEXT: xorl %eax, %eax
40 define <4 x i32> @combine_vec_srem_by_negone(<4 x i32> %x) {
41 ; SSE-LABEL: combine_vec_srem_by_negone:
43 ; SSE-NEXT: xorps %xmm0, %xmm0
46 ; AVX-LABEL: combine_vec_srem_by_negone:
48 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
50 %1 = srem <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
54 ; TODO fold (srem x, INT_MIN)
55 define i32 @combine_srem_by_minsigned(i32 %x) {
56 ; CHECK-LABEL: combine_srem_by_minsigned:
58 ; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
59 ; CHECK-NEXT: leal 2147483647(%rdi), %eax
60 ; CHECK-NEXT: testl %edi, %edi
61 ; CHECK-NEXT: cmovnsl %edi, %eax
62 ; CHECK-NEXT: andl $-2147483648, %eax # imm = 0x80000000
63 ; CHECK-NEXT: addl %edi, %eax
65 %1 = srem i32 %x, -2147483648
69 define <4 x i32> @combine_vec_srem_by_minsigned(<4 x i32> %x) {
70 ; SSE-LABEL: combine_vec_srem_by_minsigned:
72 ; SSE-NEXT: movdqa %xmm0, %xmm1
73 ; SSE-NEXT: psrad $31, %xmm1
74 ; SSE-NEXT: psrld $1, %xmm1
75 ; SSE-NEXT: paddd %xmm0, %xmm1
76 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
77 ; SSE-NEXT: paddd %xmm1, %xmm0
80 ; AVX1-LABEL: combine_vec_srem_by_minsigned:
82 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
83 ; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
84 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
85 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
86 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
89 ; AVX2-LABEL: combine_vec_srem_by_minsigned:
91 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
92 ; AVX2-NEXT: vpsrld $1, %xmm1, %xmm1
93 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
94 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
95 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
96 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
98 %1 = srem <4 x i32> %x, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
102 ; fold (srem 0, x) -> 0
103 define i32 @combine_srem_zero(i32 %x) {
104 ; CHECK-LABEL: combine_srem_zero:
106 ; CHECK-NEXT: xorl %eax, %eax
112 define <4 x i32> @combine_vec_srem_zero(<4 x i32> %x) {
113 ; SSE-LABEL: combine_vec_srem_zero:
115 ; SSE-NEXT: xorps %xmm0, %xmm0
118 ; AVX-LABEL: combine_vec_srem_zero:
120 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
122 %1 = srem <4 x i32> zeroinitializer, %x
126 ; fold (srem x, x) -> 0
127 define i32 @combine_srem_dupe(i32 %x) {
128 ; CHECK-LABEL: combine_srem_dupe:
130 ; CHECK-NEXT: xorl %eax, %eax
136 define <4 x i32> @combine_vec_srem_dupe(<4 x i32> %x) {
137 ; SSE-LABEL: combine_vec_srem_dupe:
139 ; SSE-NEXT: xorps %xmm0, %xmm0
142 ; AVX-LABEL: combine_vec_srem_dupe:
144 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
146 %1 = srem <4 x i32> %x, %x
150 ; fold (srem x, y) -> (urem x, y) iff x and y are positive
151 define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) {
152 ; SSE-LABEL: combine_vec_srem_by_pos0:
154 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
157 ; AVX1-LABEL: combine_vec_srem_by_pos0:
159 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
162 ; AVX2-LABEL: combine_vec_srem_by_pos0:
164 ; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [3,3,3,3]
165 ; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
167 %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
168 %2 = srem <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
172 define <4 x i32> @combine_vec_srem_by_pos1(<4 x i32> %x) {
173 ; SSE-LABEL: combine_vec_srem_by_pos1:
175 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
178 ; AVX-LABEL: combine_vec_srem_by_pos1:
180 ; AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
182 %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
183 %2 = srem <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16>
187 ; fold (srem x, (1 << c)) -> x - (x / (1 << c)) * (1 << c).
188 define <4 x i32> @combine_vec_srem_by_pow2a(<4 x i32> %x) {
189 ; SSE-LABEL: combine_vec_srem_by_pow2a:
191 ; SSE-NEXT: movdqa %xmm0, %xmm1
192 ; SSE-NEXT: psrad $31, %xmm1
193 ; SSE-NEXT: psrld $30, %xmm1
194 ; SSE-NEXT: paddd %xmm0, %xmm1
195 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
196 ; SSE-NEXT: psubd %xmm1, %xmm0
199 ; AVX1-LABEL: combine_vec_srem_by_pow2a:
201 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
202 ; AVX1-NEXT: vpsrld $30, %xmm1, %xmm1
203 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
204 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
205 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
208 ; AVX2-LABEL: combine_vec_srem_by_pow2a:
210 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
211 ; AVX2-NEXT: vpsrld $30, %xmm1, %xmm1
212 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
213 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4294967292,4294967292,4294967292,4294967292]
214 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
215 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
217 %1 = srem <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
221 define <4 x i32> @combine_vec_srem_by_pow2a_neg(<4 x i32> %x) {
222 ; SSE-LABEL: combine_vec_srem_by_pow2a_neg:
224 ; SSE-NEXT: movdqa %xmm0, %xmm1
225 ; SSE-NEXT: psrad $31, %xmm1
226 ; SSE-NEXT: psrld $30, %xmm1
227 ; SSE-NEXT: paddd %xmm0, %xmm1
228 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
229 ; SSE-NEXT: psubd %xmm1, %xmm0
232 ; AVX1-LABEL: combine_vec_srem_by_pow2a_neg:
234 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
235 ; AVX1-NEXT: vpsrld $30, %xmm1, %xmm1
236 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
237 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
238 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
241 ; AVX2-LABEL: combine_vec_srem_by_pow2a_neg:
243 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
244 ; AVX2-NEXT: vpsrld $30, %xmm1, %xmm1
245 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
246 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4294967292,4294967292,4294967292,4294967292]
247 ; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
248 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
250 %1 = srem <4 x i32> %x, <i32 -4, i32 -4, i32 -4, i32 -4>
254 define <4 x i32> @combine_vec_srem_by_pow2b(<4 x i32> %x) {
255 ; SSE-LABEL: combine_vec_srem_by_pow2b:
257 ; SSE-NEXT: movdqa %xmm0, %xmm1
258 ; SSE-NEXT: psrld $31, %xmm1
259 ; SSE-NEXT: movdqa %xmm0, %xmm2
260 ; SSE-NEXT: psrad $31, %xmm2
261 ; SSE-NEXT: movdqa %xmm2, %xmm3
262 ; SSE-NEXT: psrld $29, %xmm3
263 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm3[4,5,6,7]
264 ; SSE-NEXT: psrld $30, %xmm2
265 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
266 ; SSE-NEXT: paddd %xmm0, %xmm2
267 ; SSE-NEXT: movdqa %xmm2, %xmm1
268 ; SSE-NEXT: psrad $3, %xmm1
269 ; SSE-NEXT: movdqa %xmm2, %xmm3
270 ; SSE-NEXT: psrad $1, %xmm3
271 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm1[4,5,6,7]
272 ; SSE-NEXT: psrad $2, %xmm2
273 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
274 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
275 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
276 ; SSE-NEXT: psubd %xmm2, %xmm0
279 ; AVX1-LABEL: combine_vec_srem_by_pow2b:
281 ; AVX1-NEXT: vpsrld $31, %xmm0, %xmm1
282 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
283 ; AVX1-NEXT: vpsrld $29, %xmm2, %xmm3
284 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
285 ; AVX1-NEXT: vpsrld $30, %xmm2, %xmm2
286 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
287 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
288 ; AVX1-NEXT: vpsrad $3, %xmm1, %xmm2
289 ; AVX1-NEXT: vpsrad $1, %xmm1, %xmm3
290 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
291 ; AVX1-NEXT: vpsrad $2, %xmm1, %xmm1
292 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
293 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
294 ; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
295 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
298 ; AVX2-LABEL: combine_vec_srem_by_pow2b:
300 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
301 ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
302 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
303 ; AVX2-NEXT: vpmovsxbd {{.*#+}} xmm2 = [0,1,2,3]
304 ; AVX2-NEXT: vpsravd %xmm2, %xmm1, %xmm1
305 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
306 ; AVX2-NEXT: vpsllvd %xmm2, %xmm1, %xmm1
307 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
309 %1 = srem <4 x i32> %x, <i32 1, i32 2, i32 4, i32 8>
313 define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) {
314 ; SSE-LABEL: combine_vec_srem_by_pow2b_neg:
316 ; SSE-NEXT: movdqa %xmm0, %xmm1
317 ; SSE-NEXT: psrad $31, %xmm1
318 ; SSE-NEXT: movdqa %xmm1, %xmm2
319 ; SSE-NEXT: psrld $28, %xmm2
320 ; SSE-NEXT: movdqa %xmm1, %xmm3
321 ; SSE-NEXT: psrld $30, %xmm3
322 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
323 ; SSE-NEXT: movdqa %xmm0, %xmm2
324 ; SSE-NEXT: psrld $31, %xmm2
325 ; SSE-NEXT: psrld $29, %xmm1
326 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
327 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
328 ; SSE-NEXT: paddd %xmm0, %xmm1
329 ; SSE-NEXT: movdqa %xmm1, %xmm2
330 ; SSE-NEXT: psrad $4, %xmm2
331 ; SSE-NEXT: movdqa %xmm1, %xmm3
332 ; SSE-NEXT: psrad $2, %xmm3
333 ; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
334 ; SSE-NEXT: movdqa %xmm1, %xmm2
335 ; SSE-NEXT: psrad $3, %xmm2
336 ; SSE-NEXT: psrld $1, %xmm1
337 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
338 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
339 ; SSE-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
340 ; SSE-NEXT: paddd %xmm1, %xmm0
343 ; AVX1-LABEL: combine_vec_srem_by_pow2b_neg:
345 ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
346 ; AVX1-NEXT: vpsrld $28, %xmm1, %xmm2
347 ; AVX1-NEXT: vpsrld $30, %xmm1, %xmm3
348 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
349 ; AVX1-NEXT: vpsrld $31, %xmm0, %xmm3
350 ; AVX1-NEXT: vpsrld $29, %xmm1, %xmm1
351 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
352 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
353 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1
354 ; AVX1-NEXT: vpsrad $4, %xmm1, %xmm2
355 ; AVX1-NEXT: vpsrad $2, %xmm1, %xmm3
356 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
357 ; AVX1-NEXT: vpsrad $3, %xmm1, %xmm3
358 ; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
359 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
360 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
361 ; AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
362 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
365 ; AVX2-LABEL: combine_vec_srem_by_pow2b_neg:
367 ; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
368 ; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
369 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm1
370 ; AVX2-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
371 ; AVX2-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
372 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
374 %1 = srem <4 x i32> %x, <i32 -2, i32 -4, i32 -8, i32 -16>
378 ; FIXME: PR55271 - srem(undef, 3) != undef
379 ; Use PSLLI intrinsic to postpone the undef creation until after urem-by-constant expansion
380 define <4 x i32> @combine_vec_srem_undef_by_3(<4 x i32> %in) {
381 ; CHECK-LABEL: combine_vec_srem_undef_by_3:
384 %x = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> undef, i32 0)
385 %y = srem <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
388 declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32)
391 ; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=6883
392 define i32 @ossfuzz6883() {
393 ; CHECK-LABEL: ossfuzz6883:
395 ; CHECK-NEXT: movl (%rax), %ecx
396 ; CHECK-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
397 ; CHECK-NEXT: xorl %edx, %edx
398 ; CHECK-NEXT: idivl %ecx
399 ; CHECK-NEXT: movl %eax, %esi
400 ; CHECK-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
401 ; CHECK-NEXT: xorl %edx, %edx
402 ; CHECK-NEXT: divl %ecx
403 ; CHECK-NEXT: movl %eax, %edi
404 ; CHECK-NEXT: movl %esi, %eax
406 ; CHECK-NEXT: idivl %edi
407 ; CHECK-NEXT: movl %edx, %esi
408 ; CHECK-NEXT: movl %ecx, %eax
410 ; CHECK-NEXT: idivl %esi
411 ; CHECK-NEXT: movl %edx, %edi
412 ; CHECK-NEXT: movl %ecx, %eax
413 ; CHECK-NEXT: xorl %edx, %edx
414 ; CHECK-NEXT: divl %esi
415 ; CHECK-NEXT: andl %edi, %eax
417 %B17 = or i32 0, 2147483647
418 %L6 = load i32, ptr undef
419 %B11 = sdiv i32 %B17, %L6
420 %B13 = udiv i32 %B17, %L6
421 %B14 = srem i32 %B11, %B13
422 %B16 = srem i32 %L6, %B14
423 %B10 = udiv i32 %L6, %B14
424 %B6 = and i32 %B16, %B10
428 define i1 @bool_srem(i1 %x, i1 %y) {
429 ; CHECK-LABEL: bool_srem:
431 ; CHECK-NEXT: xorl %eax, %eax
436 define <4 x i1> @boolvec_srem(<4 x i1> %x, <4 x i1> %y) {
437 ; SSE-LABEL: boolvec_srem:
439 ; SSE-NEXT: xorps %xmm0, %xmm0
442 ; AVX-LABEL: boolvec_srem:
444 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
446 %r = srem <4 x i1> %x, %y
450 define i32 @combine_srem_two(i32 %x) {
451 ; CHECK-LABEL: combine_srem_two:
453 ; CHECK-NEXT: movl %edi, %eax
454 ; CHECK-NEXT: movl %edi, %ecx
455 ; CHECK-NEXT: shrl $31, %ecx
456 ; CHECK-NEXT: addl %edi, %ecx
457 ; CHECK-NEXT: andl $-2, %ecx
458 ; CHECK-NEXT: subl %ecx, %eax
464 define i32 @combine_srem_negtwo(i32 %x) {
465 ; CHECK-LABEL: combine_srem_negtwo:
467 ; CHECK-NEXT: movl %edi, %eax
468 ; CHECK-NEXT: movl %edi, %ecx
469 ; CHECK-NEXT: shrl $31, %ecx
470 ; CHECK-NEXT: addl %edi, %ecx
471 ; CHECK-NEXT: andl $-2, %ecx
472 ; CHECK-NEXT: subl %ecx, %eax
478 define i8 @combine_i8_srem_negpow2(i8 %x) {
479 ; CHECK-LABEL: combine_i8_srem_negpow2:
481 ; CHECK-NEXT: movl %edi, %eax
482 ; CHECK-NEXT: movl %eax, %ecx
483 ; CHECK-NEXT: sarb $7, %cl
484 ; CHECK-NEXT: shrb $2, %cl
485 ; CHECK-NEXT: addb %al, %cl
486 ; CHECK-NEXT: andb $-64, %cl
487 ; CHECK-NEXT: subb %cl, %al
488 ; CHECK-NEXT: # kill: def $al killed $al killed $eax
494 define i16 @combine_i16_srem_pow2(i16 %x) {
495 ; CHECK-LABEL: combine_i16_srem_pow2:
497 ; CHECK-NEXT: movl %edi, %eax
498 ; CHECK-NEXT: leal 15(%rax), %ecx
499 ; CHECK-NEXT: testw %ax, %ax
500 ; CHECK-NEXT: cmovnsl %edi, %ecx
501 ; CHECK-NEXT: andl $-16, %ecx
502 ; CHECK-NEXT: subl %ecx, %eax
503 ; CHECK-NEXT: # kill: def $ax killed $ax killed $rax
509 define i16 @combine_i16_srem_negpow2(i16 %x) {
510 ; CHECK-LABEL: combine_i16_srem_negpow2:
512 ; CHECK-NEXT: movl %edi, %eax
513 ; CHECK-NEXT: leal 255(%rax), %ecx
514 ; CHECK-NEXT: testw %ax, %ax
515 ; CHECK-NEXT: cmovnsl %edi, %ecx
516 ; CHECK-NEXT: andl $-256, %ecx
517 ; CHECK-NEXT: subl %ecx, %eax
518 ; CHECK-NEXT: # kill: def $ax killed $ax killed $rax
520 %1 = srem i16 %x, -256
524 define i32 @combine_srem_pow2(i32 %x) {
525 ; CHECK-LABEL: combine_srem_pow2:
527 ; CHECK-NEXT: movl %edi, %eax
528 ; CHECK-NEXT: leal 15(%rax), %ecx
529 ; CHECK-NEXT: testl %edi, %edi
530 ; CHECK-NEXT: cmovnsl %edi, %ecx
531 ; CHECK-NEXT: andl $-16, %ecx
532 ; CHECK-NEXT: subl %ecx, %eax
533 ; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
539 define i32 @combine_srem_negpow2(i32 %x) {
540 ; CHECK-LABEL: combine_srem_negpow2:
542 ; CHECK-NEXT: movl %edi, %eax
543 ; CHECK-NEXT: leal 255(%rax), %ecx
544 ; CHECK-NEXT: testl %edi, %edi
545 ; CHECK-NEXT: cmovnsl %edi, %ecx
546 ; CHECK-NEXT: andl $-256, %ecx
547 ; CHECK-NEXT: subl %ecx, %eax
548 ; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
550 %1 = srem i32 %x, -256
554 define i64 @combine_i64_srem_pow2(i64 %x) {
555 ; CHECK-LABEL: combine_i64_srem_pow2:
557 ; CHECK-NEXT: movq %rdi, %rax
558 ; CHECK-NEXT: leaq 15(%rdi), %rcx
559 ; CHECK-NEXT: testq %rdi, %rdi
560 ; CHECK-NEXT: cmovnsq %rdi, %rcx
561 ; CHECK-NEXT: andq $-16, %rcx
562 ; CHECK-NEXT: subq %rcx, %rax
568 define i64 @combine_i64_srem_negpow2(i64 %x) {
569 ; CHECK-LABEL: combine_i64_srem_negpow2:
571 ; CHECK-NEXT: movq %rdi, %rax
572 ; CHECK-NEXT: leaq 255(%rdi), %rcx
573 ; CHECK-NEXT: testq %rdi, %rdi
574 ; CHECK-NEXT: cmovnsq %rdi, %rcx
575 ; CHECK-NEXT: andq $-256, %rcx
576 ; CHECK-NEXT: subq %rcx, %rax
578 %1 = srem i64 %x, -256