1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
4 define i32 @zero_dividend(i32 %A) {
5 ; CHECK-LABEL: @zero_dividend(
6 ; CHECK-NEXT: ret i32 0
12 define <2 x i32> @zero_dividend_vector(<2 x i32> %A) {
13 ; CHECK-LABEL: @zero_dividend_vector(
14 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
16 %B = srem <2 x i32> zeroinitializer, %A
20 define <2 x i32> @zero_dividend_vector_poison_elt(<2 x i32> %A) {
21 ; CHECK-LABEL: @zero_dividend_vector_poison_elt(
22 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
24 %B = urem <2 x i32> <i32 poison, i32 0>, %A
28 ; Division-by-zero is poison. UB in any vector lane means the whole op is poison.
30 define <2 x i8> @srem_zero_elt_vec_constfold(<2 x i8> %x) {
31 ; CHECK-LABEL: @srem_zero_elt_vec_constfold(
32 ; CHECK-NEXT: ret <2 x i8> poison
34 %rem = srem <2 x i8> <i8 1, i8 2>, <i8 0, i8 -42>
38 define <2 x i8> @urem_zero_elt_vec_constfold(<2 x i8> %x) {
39 ; CHECK-LABEL: @urem_zero_elt_vec_constfold(
40 ; CHECK-NEXT: ret <2 x i8> poison
42 %rem = urem <2 x i8> <i8 1, i8 2>, <i8 42, i8 0>
46 define <2 x i8> @srem_zero_elt_vec(<2 x i8> %x) {
47 ; CHECK-LABEL: @srem_zero_elt_vec(
48 ; CHECK-NEXT: ret <2 x i8> poison
50 %rem = srem <2 x i8> %x, <i8 -42, i8 0>
54 define <2 x i8> @urem_zero_elt_vec(<2 x i8> %x) {
55 ; CHECK-LABEL: @urem_zero_elt_vec(
56 ; CHECK-NEXT: ret <2 x i8> poison
58 %rem = urem <2 x i8> %x, <i8 0, i8 42>
62 define <2 x i8> @srem_undef_elt_vec(<2 x i8> %x) {
63 ; CHECK-LABEL: @srem_undef_elt_vec(
64 ; CHECK-NEXT: ret <2 x i8> poison
66 %rem = srem <2 x i8> %x, <i8 -42, i8 undef>
70 define <2 x i8> @urem_undef_elt_vec(<2 x i8> %x) {
71 ; CHECK-LABEL: @urem_undef_elt_vec(
72 ; CHECK-NEXT: ret <2 x i8> poison
74 %rem = urem <2 x i8> %x, <i8 undef, i8 42>
78 ; Division-by-zero is undef. UB in any vector lane means the whole op is undef.
79 ; Thus, we can simplify this: if any element of 'y' is 0, we can do anything.
80 ; Therefore, assume that all elements of 'y' must be 1.
82 define <2 x i1> @srem_bool_vec(<2 x i1> %x, <2 x i1> %y) {
83 ; CHECK-LABEL: @srem_bool_vec(
84 ; CHECK-NEXT: ret <2 x i1> zeroinitializer
86 %rem = srem <2 x i1> %x, %y
90 define <2 x i1> @urem_bool_vec(<2 x i1> %x, <2 x i1> %y) {
91 ; CHECK-LABEL: @urem_bool_vec(
92 ; CHECK-NEXT: ret <2 x i1> zeroinitializer
94 %rem = urem <2 x i1> %x, %y
98 define <2 x i32> @zext_bool_urem_divisor_vec(<2 x i1> %x, <2 x i32> %y) {
99 ; CHECK-LABEL: @zext_bool_urem_divisor_vec(
100 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
102 %ext = zext <2 x i1> %x to <2 x i32>
103 %r = urem <2 x i32> %y, %ext
107 define i32 @zext_bool_srem_divisor(i1 %x, i32 %y) {
108 ; CHECK-LABEL: @zext_bool_srem_divisor(
109 ; CHECK-NEXT: ret i32 0
111 %ext = zext i1 %x to i32
112 %r = srem i32 %y, %ext
116 define i32 @select1(i32 %x, i1 %b) {
117 ; CHECK-LABEL: @select1(
118 ; CHECK-NEXT: ret i32 0
120 %rhs = select i1 %b, i32 %x, i32 1
121 %rem = srem i32 %x, %rhs
125 define i32 @select2(i32 %x, i1 %b) {
126 ; CHECK-LABEL: @select2(
127 ; CHECK-NEXT: ret i32 0
129 %rhs = select i1 %b, i32 %x, i32 1
130 %rem = urem i32 %x, %rhs
134 define i32 @rem1(i32 %x, i32 %n) {
135 ; CHECK-LABEL: @rem1(
136 ; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[X:%.*]], [[N:%.*]]
137 ; CHECK-NEXT: ret i32 [[MOD]]
139 %mod = srem i32 %x, %n
140 %mod1 = srem i32 %mod, %n
144 define i32 @rem2(i32 %x, i32 %n) {
145 ; CHECK-LABEL: @rem2(
146 ; CHECK-NEXT: [[MOD:%.*]] = urem i32 [[X:%.*]], [[N:%.*]]
147 ; CHECK-NEXT: ret i32 [[MOD]]
149 %mod = urem i32 %x, %n
150 %mod1 = urem i32 %mod, %n
154 define i32 @rem3(i32 %x, i32 %n) {
155 ; CHECK-LABEL: @rem3(
156 ; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[X:%.*]], [[N:%.*]]
157 ; CHECK-NEXT: [[MOD1:%.*]] = urem i32 [[MOD]], [[N]]
158 ; CHECK-NEXT: ret i32 [[MOD1]]
160 %mod = srem i32 %x, %n
161 %mod1 = urem i32 %mod, %n
165 define i32 @urem_dividend_known_smaller_than_constant_divisor(i32 %x) {
166 ; CHECK-LABEL: @urem_dividend_known_smaller_than_constant_divisor(
167 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 250
168 ; CHECK-NEXT: ret i32 [[AND]]
170 %and = and i32 %x, 250
171 %r = urem i32 %and, 251
175 define i32 @not_urem_dividend_known_smaller_than_constant_divisor(i32 %x) {
176 ; CHECK-LABEL: @not_urem_dividend_known_smaller_than_constant_divisor(
177 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 251
178 ; CHECK-NEXT: [[R:%.*]] = urem i32 [[AND]], 251
179 ; CHECK-NEXT: ret i32 [[R]]
181 %and = and i32 %x, 251
182 %r = urem i32 %and, 251
186 define i8 @urem_dividend_known_smaller_than_constant_divisor2(i1 %b) {
187 ; CHECK-LABEL: @urem_dividend_known_smaller_than_constant_divisor2(
188 ; CHECK-NEXT: [[T0:%.*]] = zext i1 [[B:%.*]] to i8
189 ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[T0]], 12
190 ; CHECK-NEXT: ret i8 [[XOR]]
192 %t0 = zext i1 %b to i8
193 %xor = xor i8 %t0, 12
194 %r = urem i8 %xor, 14
198 ; negative test - dividend can equal 13
200 define i8 @not_urem_dividend_known_smaller_than_constant_divisor2(i1 %b) {
201 ; CHECK-LABEL: @not_urem_dividend_known_smaller_than_constant_divisor2(
202 ; CHECK-NEXT: [[T0:%.*]] = zext i1 [[B:%.*]] to i8
203 ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[T0]], 12
204 ; CHECK-NEXT: [[R:%.*]] = urem i8 [[XOR]], 13
205 ; CHECK-NEXT: ret i8 [[R]]
207 %t0 = zext i1 %b to i8
208 %xor = xor i8 %t0, 12
209 %r = urem i8 %xor, 13
213 define i32 @urem_constant_dividend_known_smaller_than_divisor(i32 %x) {
214 ; CHECK-LABEL: @urem_constant_dividend_known_smaller_than_divisor(
215 ; CHECK-NEXT: ret i32 250
218 %r = urem i32 250, %or
222 define i32 @not_urem_constant_dividend_known_smaller_than_divisor(i32 %x) {
223 ; CHECK-LABEL: @not_urem_constant_dividend_known_smaller_than_divisor(
224 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], 251
225 ; CHECK-NEXT: [[R:%.*]] = urem i32 251, [[OR]]
226 ; CHECK-NEXT: ret i32 [[R]]
229 %r = urem i32 251, %or
233 ; This would require computing known bits on both x and y. Is it worth doing?
235 define i32 @urem_dividend_known_smaller_than_divisor(i32 %x, i32 %y) {
236 ; CHECK-LABEL: @urem_dividend_known_smaller_than_divisor(
237 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 250
238 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 251
239 ; CHECK-NEXT: [[R:%.*]] = urem i32 [[AND]], [[OR]]
240 ; CHECK-NEXT: ret i32 [[R]]
242 %and = and i32 %x, 250
244 %r = urem i32 %and, %or
248 define i32 @not_urem_dividend_known_smaller_than_divisor(i32 %x, i32 %y) {
249 ; CHECK-LABEL: @not_urem_dividend_known_smaller_than_divisor(
250 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 251
251 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 251
252 ; CHECK-NEXT: [[R:%.*]] = urem i32 [[AND]], [[OR]]
253 ; CHECK-NEXT: ret i32 [[R]]
255 %and = and i32 %x, 251
257 %r = urem i32 %and, %or
261 declare i32 @external()
264 ; CHECK-LABEL: @rem4(
265 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @external(), !range [[RNG0:![0-9]+]]
266 ; CHECK-NEXT: ret i32 [[CALL]]
268 %call = call i32 @external() , !range !0
269 %urem = urem i32 %call, 3
275 define i32 @rem5(i32 %x, i32 %y) {
276 ; CHECK-LABEL: @rem5(
277 ; CHECK-NEXT: ret i32 0
279 %shl = shl nsw i32 %x, %y
280 %mod = srem i32 %shl, %x
284 define <2 x i32> @rem6(<2 x i32> %x, <2 x i32> %y) {
285 ; CHECK-LABEL: @rem6(
286 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
288 %shl = shl nsw <2 x i32> %x, %y
289 %mod = srem <2 x i32> %shl, %x
293 ; make sure the previous fold doesn't take place for wrapped shifts
295 define i32 @rem7(i32 %x, i32 %y) {
296 ; CHECK-LABEL: @rem7(
297 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
298 ; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[SHL]], [[X]]
299 ; CHECK-NEXT: ret i32 [[MOD]]
301 %shl = shl i32 %x, %y
302 %mod = srem i32 %shl, %x
306 define i32 @rem8(i32 %x, i32 %y) {
307 ; CHECK-LABEL: @rem8(
308 ; CHECK-NEXT: ret i32 0
310 %shl = shl nuw i32 %x, %y
311 %mod = urem i32 %shl, %x
315 define <2 x i32> @rem9(<2 x i32> %x, <2 x i32> %y) {
316 ; CHECK-LABEL: @rem9(
317 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
319 %shl = shl nuw <2 x i32> %x, %y
320 %mod = urem <2 x i32> %shl, %x
324 ; make sure the previous fold doesn't take place for wrapped shifts
326 define i32 @rem10(i32 %x, i32 %y) {
327 ; CHECK-LABEL: @rem10(
328 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
329 ; CHECK-NEXT: [[MOD:%.*]] = urem i32 [[SHL]], [[X]]
330 ; CHECK-NEXT: ret i32 [[MOD]]
332 %shl = shl i32 %x, %y
333 %mod = urem i32 %shl, %x
337 define i32 @srem_with_sext_bool_divisor(i1 %x, i32 %y) {
338 ; CHECK-LABEL: @srem_with_sext_bool_divisor(
339 ; CHECK-NEXT: ret i32 0
341 %s = sext i1 %x to i32
346 define <2 x i32> @srem_with_sext_bool_divisor_vec(<2 x i1> %x, <2 x i32> %y) {
347 ; CHECK-LABEL: @srem_with_sext_bool_divisor_vec(
348 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
350 %s = sext <2 x i1> %x to <2 x i32>
351 %r = srem <2 x i32> %y, %s
355 define i8 @srem_minusone_divisor() {
356 ; CHECK-LABEL: @srem_minusone_divisor(
357 ; CHECK-NEXT: ret i8 poison
359 %v = srem i8 -128, -1
363 define i32 @srem_of_mul_nsw(i32 %x, i32 %y) {
364 ; CHECK-LABEL: @srem_of_mul_nsw(
365 ; CHECK-NEXT: ret i32 0
367 %mul = mul nsw i32 %x, %y
368 %mod = srem i32 %mul, %y
372 ; Verify that the optimization kicks in for:
373 ; - Y * X % Y as well as X * Y % Y
375 define <2 x i32> @srem_of_mul_nsw_vec_commuted(<2 x i32> %x, <2 x i32> %y) {
376 ; CHECK-LABEL: @srem_of_mul_nsw_vec_commuted(
377 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
379 %mul = mul nsw <2 x i32> %y, %x
380 %mod = srem <2 x i32> %mul, %y
384 define i32 @srem_of_mul_nuw(i32 %x, i32 %y) {
385 ; CHECK-LABEL: @srem_of_mul_nuw(
386 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[X:%.*]], [[Y:%.*]]
387 ; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[MUL]], [[Y]]
388 ; CHECK-NEXT: ret i32 [[MOD]]
390 %mul = mul nuw i32 %x, %y
391 %mod = srem i32 %mul, %y
395 define i32 @srem_of_mul(i32 %x, i32 %y) {
396 ; CHECK-LABEL: @srem_of_mul(
397 ; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]]
398 ; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[MUL]], [[Y]]
399 ; CHECK-NEXT: ret i32 [[MOD]]
401 %mul = mul i32 %x, %y
402 %mod = srem i32 %mul, %y
406 define i32 @urem_of_mul_nsw(i32 %x, i32 %y) {
407 ; CHECK-LABEL: @urem_of_mul_nsw(
408 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], [[Y:%.*]]
409 ; CHECK-NEXT: [[MOD:%.*]] = urem i32 [[MUL]], [[Y]]
410 ; CHECK-NEXT: ret i32 [[MOD]]
412 %mul = mul nsw i32 %x, %y
413 %mod = urem i32 %mul, %y
417 define i32 @urem_of_mul_nuw(i32 %x, i32 %y) {
418 ; CHECK-LABEL: @urem_of_mul_nuw(
419 ; CHECK-NEXT: ret i32 0
421 %mul = mul nuw i32 %x, %y
422 %mod = urem i32 %mul, %y
426 define <2 x i32> @srem_of_mul_nuw_vec_commuted(<2 x i32> %x, <2 x i32> %y) {
427 ; CHECK-LABEL: @srem_of_mul_nuw_vec_commuted(
428 ; CHECK-NEXT: ret <2 x i32> zeroinitializer
430 %mul = mul nuw <2 x i32> %y, %x
431 %mod = urem <2 x i32> %mul, %y
435 define i32 @urem_of_mul(i32 %x, i32 %y) {
436 ; CHECK-LABEL: @urem_of_mul(
437 ; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]]
438 ; CHECK-NEXT: [[MOD:%.*]] = urem i32 [[MUL]], [[Y]]
439 ; CHECK-NEXT: ret i32 [[MOD]]
441 %mul = mul i32 %x, %y
442 %mod = urem i32 %mul, %y
446 define i4 @srem_mul_sdiv(i4 %x, i4 %y) {
447 ; CHECK-LABEL: @srem_mul_sdiv(
448 ; CHECK-NEXT: ret i4 0
452 %mod = srem i4 %mul, %y
456 define i8 @srem_mul_udiv(i8 %x, i8 %y) {
457 ; CHECK-LABEL: @srem_mul_udiv(
458 ; CHECK-NEXT: [[D:%.*]] = udiv i8 [[X:%.*]], [[Y:%.*]]
459 ; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[D]], [[Y]]
460 ; CHECK-NEXT: [[MOD:%.*]] = srem i8 [[MUL]], [[Y]]
461 ; CHECK-NEXT: ret i8 [[MOD]]
465 %mod = srem i8 %mul, %y
469 define <3 x i7> @urem_mul_udiv_vec_commuted(<3 x i7> %x, <3 x i7> %y) {
470 ; CHECK-LABEL: @urem_mul_udiv_vec_commuted(
471 ; CHECK-NEXT: ret <3 x i7> zeroinitializer
473 %d = udiv <3 x i7> %x, %y
474 %mul = mul <3 x i7> %y, %d
475 %mod = urem <3 x i7> %mul, %y
479 define i8 @urem_mul_sdiv(i8 %x, i8 %y) {
480 ; CHECK-LABEL: @urem_mul_sdiv(
481 ; CHECK-NEXT: [[D:%.*]] = sdiv i8 [[X:%.*]], [[Y:%.*]]
482 ; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[Y]], [[D]]
483 ; CHECK-NEXT: [[MOD:%.*]] = urem i8 [[MUL]], [[Y]]
484 ; CHECK-NEXT: ret i8 [[MOD]]
488 %mod = urem i8 %mul, %y
492 define <2 x i8> @simplfy_srem_of_mul(<2 x i8> %x) {
493 ; CHECK-LABEL: @simplfy_srem_of_mul(
494 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
496 %mul = mul nsw <2 x i8> %x, <i8 20, i8 10>
497 %r = srem <2 x i8> %mul, <i8 5, i8 5>
501 define <2 x i8> @simplfy_srem_of_mul_fail_bad_mod(<2 x i8> %x) {
502 ; CHECK-LABEL: @simplfy_srem_of_mul_fail_bad_mod(
503 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw <2 x i8> [[X:%.*]], <i8 20, i8 11>
504 ; CHECK-NEXT: [[R:%.*]] = srem <2 x i8> [[MUL]], <i8 5, i8 5>
505 ; CHECK-NEXT: ret <2 x i8> [[R]]
507 %mul = mul nsw <2 x i8> %x, <i8 20, i8 11>
508 %r = srem <2 x i8> %mul, <i8 5, i8 5>
512 define i8 @simplfy_urem_of_mul(i8 %x) {
513 ; CHECK-LABEL: @simplfy_urem_of_mul(
514 ; CHECK-NEXT: ret i8 0
516 %mul = mul nuw i8 %x, 30
517 %r = urem i8 %mul, 10
521 define i8 @simplfy_urem_of_mul_fail_bad_flag(i8 %x) {
522 ; CHECK-LABEL: @simplfy_urem_of_mul_fail_bad_flag(
523 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i8 [[X:%.*]], 30
524 ; CHECK-NEXT: [[R:%.*]] = urem i8 [[MUL]], 10
525 ; CHECK-NEXT: ret i8 [[R]]
527 %mul = mul nsw i8 %x, 30
528 %r = urem i8 %mul, 10
532 define i8 @simplfy_urem_of_mul_fail_bad_mod(i8 %x) {
533 ; CHECK-LABEL: @simplfy_urem_of_mul_fail_bad_mod(
534 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw i8 [[X:%.*]], 31
535 ; CHECK-NEXT: [[R:%.*]] = urem i8 [[MUL]], 10
536 ; CHECK-NEXT: ret i8 [[R]]
538 %mul = mul nuw i8 %x, 31
539 %r = urem i8 %mul, 10