1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
8 declare i8 @llvm.uadd.sat.i8(i8, i8)
9 declare i8 @llvm.sadd.sat.i8(i8, i8)
10 declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
11 declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
13 ; Constant uadd argument is canonicalized to the right.
14 define i8 @test_scalar_uadd_canonical(i8 %a) {
15 ; CHECK-LABEL: @test_scalar_uadd_canonical(
16 ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 10)
17 ; CHECK-NEXT: ret i8 [[X]]
19 %x = call i8 @llvm.uadd.sat.i8(i8 10, i8 %a)
23 define <2 x i8> @test_vector_uadd_canonical(<2 x i8> %a) {
24 ; CHECK-LABEL: @test_vector_uadd_canonical(
25 ; CHECK-NEXT: [[X:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 20>)
26 ; CHECK-NEXT: ret <2 x i8> [[X]]
28 %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> %a)
32 ; Constant sadd argument is canonicalized to the right.
33 define i8 @test_scalar_sadd_canonical(i8 %a) {
34 ; CHECK-LABEL: @test_scalar_sadd_canonical(
35 ; CHECK-NEXT: [[X:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
36 ; CHECK-NEXT: ret i8 [[X]]
38 %x = call i8 @llvm.sadd.sat.i8(i8 -10, i8 %a)
42 define <2 x i8> @test_vector_sadd_canonical(<2 x i8> %a) {
43 ; CHECK-LABEL: @test_vector_sadd_canonical(
44 ; CHECK-NEXT: [[X:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 -20>)
45 ; CHECK-NEXT: ret <2 x i8> [[X]]
47 %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 10, i8 -20>, <2 x i8> %a)
51 ; Can combine uadds with constant operands.
52 define i8 @test_scalar_uadd_combine(i8 %a) {
53 ; CHECK-LABEL: @test_scalar_uadd_combine(
54 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 30)
55 ; CHECK-NEXT: ret i8 [[TMP1]]
57 %x1 = call i8 @llvm.uadd.sat.i8(i8 %a, i8 10)
58 %x2 = call i8 @llvm.uadd.sat.i8(i8 %x1, i8 20)
62 define <2 x i8> @test_vector_uadd_combine(<2 x i8> %a) {
63 ; CHECK-LABEL: @test_vector_uadd_combine(
64 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
65 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
67 %x1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
68 %x2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 20, i8 20>)
72 ; This could simplify, but currently doesn't.
73 define <2 x i8> @test_vector_uadd_combine_non_splat(<2 x i8> %a) {
74 ; CHECK-LABEL: @test_vector_uadd_combine_non_splat(
75 ; CHECK-NEXT: [[X1:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 20>)
76 ; CHECK-NEXT: [[X2:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 30, i8 40>)
77 ; CHECK-NEXT: ret <2 x i8> [[X2]]
79 %x1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 20>)
80 %x2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 30, i8 40>)
84 ; Can combine uadds even if they overflow.
85 define i8 @test_scalar_uadd_overflow(i8 %a) {
86 ; CHECK-LABEL: @test_scalar_uadd_overflow(
87 ; CHECK-NEXT: ret i8 -1
89 %y1 = call i8 @llvm.uadd.sat.i8(i8 %a, i8 100)
90 %y2 = call i8 @llvm.uadd.sat.i8(i8 %y1, i8 200)
94 define <2 x i8> @test_vector_uadd_overflow(<2 x i8> %a) {
95 ; CHECK-LABEL: @test_vector_uadd_overflow(
96 ; CHECK-NEXT: ret <2 x i8> <i8 -1, i8 -1>
98 %y1 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 100, i8 100>)
99 %y2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %y1, <2 x i8> <i8 200, i8 200>)
103 ; Can combine sadds if sign matches.
104 define i8 @test_scalar_sadd_both_positive(i8 %a) {
105 ; CHECK-LABEL: @test_scalar_sadd_both_positive(
106 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
107 ; CHECK-NEXT: ret i8 [[TMP1]]
109 %z1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 10)
110 %z2 = call i8 @llvm.sadd.sat.i8(i8 %z1, i8 20)
114 define <2 x i8> @test_vector_sadd_both_positive(<2 x i8> %a) {
115 ; CHECK-LABEL: @test_vector_sadd_both_positive(
116 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
117 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
119 %z1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
120 %z2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %z1, <2 x i8> <i8 20, i8 20>)
124 define i8 @test_scalar_sadd_both_negative(i8 %a) {
125 ; CHECK-LABEL: @test_scalar_sadd_both_negative(
126 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
127 ; CHECK-NEXT: ret i8 [[TMP1]]
129 %u1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 -10)
130 %u2 = call i8 @llvm.sadd.sat.i8(i8 %u1, i8 -20)
134 define <2 x i8> @test_vector_sadd_both_negative(<2 x i8> %a) {
135 ; CHECK-LABEL: @test_vector_sadd_both_negative(
136 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
137 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
139 %u1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
140 %u2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
144 ; Can't combine sadds if constants have different sign.
145 define i8 @test_scalar_sadd_different_sign(i8 %a) {
146 ; CHECK-LABEL: @test_scalar_sadd_different_sign(
147 ; CHECK-NEXT: [[V1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 10)
148 ; CHECK-NEXT: [[V2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[V1]], i8 -20)
149 ; CHECK-NEXT: ret i8 [[V2]]
151 %v1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 10)
152 %v2 = call i8 @llvm.sadd.sat.i8(i8 %v1, i8 -20)
156 ; Can't combine sadds if they overflow.
157 define i8 @test_scalar_sadd_overflow(i8 %a) {
158 ; CHECK-LABEL: @test_scalar_sadd_overflow(
159 ; CHECK-NEXT: [[W1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 100)
160 ; CHECK-NEXT: [[W2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[W1]], i8 100)
161 ; CHECK-NEXT: ret i8 [[W2]]
163 %w1 = call i8 @llvm.sadd.sat.i8(i8 %a, i8 100)
164 %w2 = call i8 @llvm.sadd.sat.i8(i8 %w1, i8 100)
168 ; neg uadd neg always overflows.
169 define i8 @test_scalar_uadd_neg_neg(i8 %a) {
170 ; CHECK-LABEL: @test_scalar_uadd_neg_neg(
171 ; CHECK-NEXT: ret i8 -1
173 %a_neg = or i8 %a, -128
174 %r = call i8 @llvm.uadd.sat.i8(i8 %a_neg, i8 -10)
178 define <2 x i8> @test_vector_uadd_neg_neg(<2 x i8> %a) {
179 ; CHECK-LABEL: @test_vector_uadd_neg_neg(
180 ; CHECK-NEXT: ret <2 x i8> <i8 -1, i8 -1>
182 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
183 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
187 ; nneg uadd nneg never overflows.
188 define i8 @test_scalar_uadd_nneg_nneg(i8 %a) {
189 ; CHECK-LABEL: @test_scalar_uadd_nneg_nneg(
190 ; CHECK-NEXT: [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
191 ; CHECK-NEXT: [[R:%.*]] = add nuw i8 [[A_NNEG]], 10
192 ; CHECK-NEXT: ret i8 [[R]]
194 %a_nneg = and i8 %a, 127
195 %r = call i8 @llvm.uadd.sat.i8(i8 %a_nneg, i8 10)
199 define <2 x i8> @test_vector_uadd_nneg_nneg(<2 x i8> %a) {
200 ; CHECK-LABEL: @test_vector_uadd_nneg_nneg(
201 ; CHECK-NEXT: [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
202 ; CHECK-NEXT: [[R:%.*]] = add nuw <2 x i8> [[A_NNEG]], <i8 10, i8 20>
203 ; CHECK-NEXT: ret <2 x i8> [[R]]
205 %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
206 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
210 ; neg uadd nneg might overflow.
211 define i8 @test_scalar_uadd_neg_nneg(i8 %a) {
212 ; CHECK-LABEL: @test_scalar_uadd_neg_nneg(
213 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
214 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A_NEG]], i8 10)
215 ; CHECK-NEXT: ret i8 [[R]]
217 %a_neg = or i8 %a, -128
218 %r = call i8 @llvm.uadd.sat.i8(i8 %a_neg, i8 10)
222 define <2 x i8> @test_vector_uadd_neg_nneg(<2 x i8> %a) {
223 ; CHECK-LABEL: @test_vector_uadd_neg_nneg(
224 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
225 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 10, i8 20>)
226 ; CHECK-NEXT: ret <2 x i8> [[R]]
228 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
229 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
233 define i8 @test_scalar_uadd_never_overflows(i8 %a) {
234 ; CHECK-LABEL: @test_scalar_uadd_never_overflows(
235 ; CHECK-NEXT: [[A_MASKED:%.*]] = and i8 [[A:%.*]], -127
236 ; CHECK-NEXT: [[R:%.*]] = add nuw nsw i8 [[A_MASKED]], 1
237 ; CHECK-NEXT: ret i8 [[R]]
239 %a_masked = and i8 %a, 129
240 %r = call i8 @llvm.uadd.sat.i8(i8 %a_masked, i8 1)
244 define <2 x i8> @test_vector_uadd_never_overflows(<2 x i8> %a) {
245 ; CHECK-LABEL: @test_vector_uadd_never_overflows(
246 ; CHECK-NEXT: [[A_MASKED:%.*]] = and <2 x i8> [[A:%.*]], <i8 -127, i8 -127>
247 ; CHECK-NEXT: [[R:%.*]] = add nuw nsw <2 x i8> [[A_MASKED]], <i8 1, i8 1>
248 ; CHECK-NEXT: ret <2 x i8> [[R]]
250 %a_masked = and <2 x i8> %a, <i8 129, i8 129>
251 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_masked, <2 x i8> <i8 1, i8 1>)
255 define i8 @test_scalar_uadd_always_overflows(i8 %a) {
256 ; CHECK-LABEL: @test_scalar_uadd_always_overflows(
257 ; CHECK-NEXT: ret i8 -1
259 %a_masked = or i8 %a, 192
260 %r = call i8 @llvm.uadd.sat.i8(i8 %a_masked, i8 64)
264 define <2 x i8> @test_vector_uadd_always_overflows(<2 x i8> %a) {
265 ; CHECK-LABEL: @test_vector_uadd_always_overflows(
266 ; CHECK-NEXT: ret <2 x i8> <i8 -1, i8 -1>
268 %a_masked = or <2 x i8> %a, <i8 192, i8 192>
269 %r = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %a_masked, <2 x i8> <i8 64, i8 64>)
273 ; neg sadd nneg never overflows.
274 define i8 @test_scalar_sadd_neg_nneg(i8 %a) {
275 ; CHECK-LABEL: @test_scalar_sadd_neg_nneg(
276 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
277 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_NEG]], 10
278 ; CHECK-NEXT: ret i8 [[R]]
280 %a_neg = or i8 %a, -128
281 %r = call i8 @llvm.sadd.sat.i8(i8 %a_neg, i8 10)
285 define <2 x i8> @test_vector_sadd_neg_nneg(<2 x i8> %a) {
286 ; CHECK-LABEL: @test_vector_sadd_neg_nneg(
287 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
288 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_NEG]], <i8 10, i8 20>
289 ; CHECK-NEXT: ret <2 x i8> [[R]]
291 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
292 %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
296 ; nneg sadd neg never overflows.
297 define i8 @test_scalar_sadd_nneg_neg(i8 %a) {
298 ; CHECK-LABEL: @test_scalar_sadd_nneg_neg(
299 ; CHECK-NEXT: [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
300 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_NNEG]], -10
301 ; CHECK-NEXT: ret i8 [[R]]
303 %a_nneg = and i8 %a, 127
304 %r = call i8 @llvm.sadd.sat.i8(i8 %a_nneg, i8 -10)
308 define <2 x i8> @test_vector_sadd_nneg_neg(<2 x i8> %a) {
309 ; CHECK-LABEL: @test_vector_sadd_nneg_neg(
310 ; CHECK-NEXT: [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
311 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_NNEG]], <i8 -10, i8 -20>
312 ; CHECK-NEXT: ret <2 x i8> [[R]]
314 %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
315 %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 -10, i8 -20>)
319 ; neg sadd neg might overflow.
320 define i8 @test_scalar_sadd_neg_neg(i8 %a) {
321 ; CHECK-LABEL: @test_scalar_sadd_neg_neg(
322 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
323 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 -10)
324 ; CHECK-NEXT: ret i8 [[R]]
326 %a_neg = or i8 %a, -128
327 %r = call i8 @llvm.sadd.sat.i8(i8 %a_neg, i8 -10)
331 define <2 x i8> @test_vector_sadd_neg_neg(<2 x i8> %a) {
332 ; CHECK-LABEL: @test_vector_sadd_neg_neg(
333 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
334 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
335 ; CHECK-NEXT: ret <2 x i8> [[R]]
337 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
338 %r = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
342 define i8 @test_scalar_sadd_always_overflows_low(i8 %a) {
343 ; CHECK-LABEL: @test_scalar_sadd_always_overflows_low(
344 ; CHECK-NEXT: ret i8 -128
346 %cmp = icmp slt i8 %a, -120
347 %min = select i1 %cmp, i8 %a, i8 -120
348 %r = call i8 @llvm.sadd.sat.i8(i8 %min, i8 -10)
352 define i8 @test_scalar_sadd_always_overflows_high(i8 %a) {
353 ; CHECK-LABEL: @test_scalar_sadd_always_overflows_high(
354 ; CHECK-NEXT: ret i8 127
356 %cmp = icmp sgt i8 %a, 120
357 %max = select i1 %cmp, i8 %a, i8 120
358 %r = call i8 @llvm.sadd.sat.i8(i8 %max, i8 10)
362 ; While this is a no-overflow condition, the nuw flag gets lost due to
363 ; canonicalization and we can no longer determine this
364 define i8 @test_scalar_uadd_sub_nuw_lost_no_ov(i8 %a) {
365 ; CHECK-LABEL: @test_scalar_uadd_sub_nuw_lost_no_ov(
366 ; CHECK-NEXT: [[B:%.*]] = add i8 [[A:%.*]], -10
367 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[B]], i8 9)
368 ; CHECK-NEXT: ret i8 [[R]]
370 %b = sub nuw i8 %a, 10
371 %r = call i8 @llvm.uadd.sat.i8(i8 %b, i8 9)
375 define i8 @test_scalar_uadd_urem_no_ov(i8 %a) {
376 ; CHECK-LABEL: @test_scalar_uadd_urem_no_ov(
377 ; CHECK-NEXT: [[B:%.*]] = urem i8 [[A:%.*]], 100
378 ; CHECK-NEXT: [[R:%.*]] = add nuw nsw i8 [[B]], -100
379 ; CHECK-NEXT: ret i8 [[R]]
382 %r = call i8 @llvm.uadd.sat.i8(i8 %b, i8 156)
386 define i8 @test_scalar_uadd_urem_may_ov(i8 %a) {
387 ; CHECK-LABEL: @test_scalar_uadd_urem_may_ov(
388 ; CHECK-NEXT: [[B:%.*]] = urem i8 [[A:%.*]], 100
389 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[B]], i8 -99)
390 ; CHECK-NEXT: ret i8 [[R]]
393 %r = call i8 @llvm.uadd.sat.i8(i8 %b, i8 157)
397 ; We have a constant range for the LHS, but only known bits for the RHS
398 define i8 @test_scalar_uadd_udiv_known_bits(i8 %a, i8 %b) {
399 ; CHECK-LABEL: @test_scalar_uadd_udiv_known_bits(
400 ; CHECK-NEXT: [[AA:%.*]] = udiv i8 -66, [[A:%.*]]
401 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 63
402 ; CHECK-NEXT: [[R:%.*]] = add nuw i8 [[AA]], [[BB]]
403 ; CHECK-NEXT: ret i8 [[R]]
405 %aa = udiv i8 190, %a
407 %r = call i8 @llvm.uadd.sat.i8(i8 %aa, i8 %bb)
411 define i8 @test_scalar_sadd_srem_no_ov(i8 %a) {
412 ; CHECK-LABEL: @test_scalar_sadd_srem_no_ov(
413 ; CHECK-NEXT: [[B:%.*]] = srem i8 [[A:%.*]], 100
414 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[B]], 28
415 ; CHECK-NEXT: ret i8 [[R]]
418 %r = call i8 @llvm.sadd.sat.i8(i8 %b, i8 28)
422 define i8 @test_scalar_sadd_srem_may_ov(i8 %a) {
423 ; CHECK-LABEL: @test_scalar_sadd_srem_may_ov(
424 ; CHECK-NEXT: [[B:%.*]] = srem i8 [[A:%.*]], 100
425 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[B]], i8 29)
426 ; CHECK-NEXT: ret i8 [[R]]
429 %r = call i8 @llvm.sadd.sat.i8(i8 %b, i8 29)
433 define i8 @test_scalar_sadd_srem_and_no_ov(i8 %a, i8 %b) {
434 ; CHECK-LABEL: @test_scalar_sadd_srem_and_no_ov(
435 ; CHECK-NEXT: [[AA:%.*]] = srem i8 [[A:%.*]], 100
436 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 15
437 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[AA]], [[BB]]
438 ; CHECK-NEXT: ret i8 [[R]]
440 %aa = srem i8 %a, 100
442 %r = call i8 @llvm.sadd.sat.i8(i8 %aa, i8 %bb)
447 ; Saturating subtraction.
450 declare i8 @llvm.usub.sat.i8(i8, i8)
451 declare i8 @llvm.ssub.sat.i8(i8, i8)
452 declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
453 declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
455 ; Cannot canonicalize usub to uadd.
456 define i8 @test_scalar_usub_canonical(i8 %a) {
457 ; CHECK-LABEL: @test_scalar_usub_canonical(
458 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
459 ; CHECK-NEXT: ret i8 [[R]]
461 %r = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
465 ; Canonicalize ssub to sadd.
466 define i8 @test_scalar_ssub_canonical(i8 %a) {
467 ; CHECK-LABEL: @test_scalar_ssub_canonical(
468 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
469 ; CHECK-NEXT: ret i8 [[TMP1]]
471 %r = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
475 define <2 x i8> @test_vector_ssub_canonical(<2 x i8> %a) {
476 ; CHECK-LABEL: @test_vector_ssub_canonical(
477 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 -10>)
478 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
480 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
484 define <2 x i8> @test_vector_ssub_canonical_min_non_splat(<2 x i8> %a) {
485 ; CHECK-LABEL: @test_vector_ssub_canonical_min_non_splat(
486 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -10, i8 10>)
487 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
489 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 -10>)
493 ; Cannot canonicalize signed min.
494 define i8 @test_scalar_ssub_canonical_min(i8 %a) {
495 ; CHECK-LABEL: @test_scalar_ssub_canonical_min(
496 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[A:%.*]], i8 -128)
497 ; CHECK-NEXT: ret i8 [[R]]
499 %r = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -128)
503 define <2 x i8> @test_vector_ssub_canonical_min(<2 x i8> %a) {
504 ; CHECK-LABEL: @test_vector_ssub_canonical_min(
505 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -128, i8 -10>)
506 ; CHECK-NEXT: ret <2 x i8> [[R]]
508 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -128, i8 -10>)
512 ; Can combine usubs with constant operands.
513 define i8 @test_scalar_usub_combine(i8 %a) {
514 ; CHECK-LABEL: @test_scalar_usub_combine(
515 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 30)
516 ; CHECK-NEXT: ret i8 [[TMP1]]
518 %x1 = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
519 %x2 = call i8 @llvm.usub.sat.i8(i8 %x1, i8 20)
523 define <2 x i8> @test_vector_usub_combine(<2 x i8> %a) {
524 ; CHECK-LABEL: @test_vector_usub_combine(
525 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
526 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
528 %x1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
529 %x2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 20, i8 20>)
533 ; This could simplify, but currently doesn't.
534 define <2 x i8> @test_vector_usub_combine_non_splat(<2 x i8> %a) {
535 ; CHECK-LABEL: @test_vector_usub_combine_non_splat(
536 ; CHECK-NEXT: [[X1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 10, i8 20>)
537 ; CHECK-NEXT: [[X2:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[X1]], <2 x i8> <i8 30, i8 40>)
538 ; CHECK-NEXT: ret <2 x i8> [[X2]]
540 %x1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 20>)
541 %x2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x1, <2 x i8> <i8 30, i8 40>)
545 ; Can combine usubs even if they overflow.
546 define i8 @test_scalar_usub_overflow(i8 %a) {
547 ; CHECK-LABEL: @test_scalar_usub_overflow(
548 ; CHECK-NEXT: ret i8 0
550 %y1 = call i8 @llvm.usub.sat.i8(i8 %a, i8 100)
551 %y2 = call i8 @llvm.usub.sat.i8(i8 %y1, i8 200)
555 define <2 x i8> @test_vector_usub_overflow(<2 x i8> %a) {
556 ; CHECK-LABEL: @test_vector_usub_overflow(
557 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
559 %y1 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 100, i8 100>)
560 %y2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %y1, <2 x i8> <i8 200, i8 200>)
564 ; Can combine ssubs if sign matches.
565 define i8 @test_scalar_ssub_both_positive(i8 %a) {
566 ; CHECK-LABEL: @test_scalar_ssub_both_positive(
567 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -30)
568 ; CHECK-NEXT: ret i8 [[TMP1]]
570 %z1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
571 %z2 = call i8 @llvm.ssub.sat.i8(i8 %z1, i8 20)
575 define <2 x i8> @test_vector_ssub_both_positive(<2 x i8> %a) {
576 ; CHECK-LABEL: @test_vector_ssub_both_positive(
577 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
578 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
580 %z1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 10, i8 10>)
581 %z2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %z1, <2 x i8> <i8 20, i8 20>)
585 define i8 @test_scalar_ssub_both_negative(i8 %a) {
586 ; CHECK-LABEL: @test_scalar_ssub_both_negative(
587 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
588 ; CHECK-NEXT: ret i8 [[TMP1]]
590 %u1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 -10)
591 %u2 = call i8 @llvm.ssub.sat.i8(i8 %u1, i8 -20)
595 define <2 x i8> @test_vector_ssub_both_negative(<2 x i8> %a) {
596 ; CHECK-LABEL: @test_vector_ssub_both_negative(
597 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 30, i8 30>)
598 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
600 %u1 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a, <2 x i8> <i8 -10, i8 -10>)
601 %u2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %u1, <2 x i8> <i8 -20, i8 -20>)
605 ; Can't combine ssubs if constants have different sign.
606 define i8 @test_scalar_ssub_different_sign(i8 %a) {
607 ; CHECK-LABEL: @test_scalar_ssub_different_sign(
608 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -10)
609 ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[TMP1]], i8 20)
610 ; CHECK-NEXT: ret i8 [[TMP2]]
612 %v1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 10)
613 %v2 = call i8 @llvm.ssub.sat.i8(i8 %v1, i8 -20)
617 ; Can combine sadd and ssub with appropriate signs.
618 define i8 @test_scalar_sadd_ssub(i8 %a) {
619 ; CHECK-LABEL: @test_scalar_sadd_ssub(
620 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 30)
621 ; CHECK-NEXT: ret i8 [[TMP1]]
623 %v1 = call i8 @llvm.sadd.sat.i8(i8 10, i8 %a)
624 %v2 = call i8 @llvm.ssub.sat.i8(i8 %v1, i8 -20)
628 define <2 x i8> @test_vector_sadd_ssub(<2 x i8> %a) {
629 ; CHECK-LABEL: @test_vector_sadd_ssub(
630 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A:%.*]], <2 x i8> <i8 -30, i8 -30>)
631 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
633 %v1 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 -10, i8 -10>, <2 x i8> %a)
634 %v2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %v1, <2 x i8> <i8 20, i8 20>)
638 ; Can't combine ssubs if they overflow.
639 define i8 @test_scalar_ssub_overflow(i8 %a) {
640 ; CHECK-LABEL: @test_scalar_ssub_overflow(
641 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A:%.*]], i8 -100)
642 ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[TMP1]], i8 -100)
643 ; CHECK-NEXT: ret i8 [[TMP2]]
645 %w1 = call i8 @llvm.ssub.sat.i8(i8 %a, i8 100)
646 %w2 = call i8 @llvm.ssub.sat.i8(i8 %w1, i8 100)
650 ; nneg usub neg always overflows.
651 define i8 @test_scalar_usub_nneg_neg(i8 %a) {
652 ; CHECK-LABEL: @test_scalar_usub_nneg_neg(
653 ; CHECK-NEXT: ret i8 0
655 %a_nneg = and i8 %a, 127
656 %r = call i8 @llvm.usub.sat.i8(i8 %a_nneg, i8 -10)
660 define <2 x i8> @test_vector_usub_nneg_neg(<2 x i8> %a) {
661 ; CHECK-LABEL: @test_vector_usub_nneg_neg(
662 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
664 %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
665 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 -10, i8 -20>)
669 ; neg usub nneg never overflows.
670 define i8 @test_scalar_usub_neg_nneg(i8 %a) {
671 ; CHECK-LABEL: @test_scalar_usub_neg_nneg(
672 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
673 ; CHECK-NEXT: [[R:%.*]] = add i8 [[A_NEG]], -10
674 ; CHECK-NEXT: ret i8 [[R]]
676 %a_neg = or i8 %a, -128
677 %r = call i8 @llvm.usub.sat.i8(i8 %a_neg, i8 10)
681 define <2 x i8> @test_vector_usub_neg_nneg(<2 x i8> %a) {
682 ; CHECK-LABEL: @test_vector_usub_neg_nneg(
683 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
684 ; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[A_NEG]], <i8 -10, i8 -20>
685 ; CHECK-NEXT: ret <2 x i8> [[R]]
687 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
688 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
692 ; nneg usub nneg never may overflow.
693 define i8 @test_scalar_usub_nneg_nneg(i8 %a) {
694 ; CHECK-LABEL: @test_scalar_usub_nneg_nneg(
695 ; CHECK-NEXT: [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
696 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A_NNEG]], i8 10)
697 ; CHECK-NEXT: ret i8 [[R]]
699 %a_nneg = and i8 %a, 127
700 %r = call i8 @llvm.usub.sat.i8(i8 %a_nneg, i8 10)
704 define <2 x i8> @test_vector_usub_nneg_nneg(<2 x i8> %a) {
705 ; CHECK-LABEL: @test_vector_usub_nneg_nneg(
706 ; CHECK-NEXT: [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
707 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[A_NNEG]], <2 x i8> <i8 10, i8 20>)
708 ; CHECK-NEXT: ret <2 x i8> [[R]]
710 %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
711 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
715 define i8 @test_scalar_usub_never_overflows(i8 %a) {
716 ; CHECK-LABEL: @test_scalar_usub_never_overflows(
717 ; CHECK-NEXT: [[A_MASKED:%.*]] = or i8 [[A:%.*]], 64
718 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_MASKED]], -10
719 ; CHECK-NEXT: ret i8 [[R]]
721 %a_masked = or i8 %a, 64
722 %r = call i8 @llvm.usub.sat.i8(i8 %a_masked, i8 10)
726 define <2 x i8> @test_vector_usub_never_overflows(<2 x i8> %a) {
727 ; CHECK-LABEL: @test_vector_usub_never_overflows(
728 ; CHECK-NEXT: [[A_MASKED:%.*]] = or <2 x i8> [[A:%.*]], <i8 64, i8 64>
729 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_MASKED]], <i8 -10, i8 -10>
730 ; CHECK-NEXT: ret <2 x i8> [[R]]
732 %a_masked = or <2 x i8> %a, <i8 64, i8 64>
733 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_masked, <2 x i8> <i8 10, i8 10>)
737 define i8 @test_scalar_usub_always_overflows(i8 %a) {
738 ; CHECK-LABEL: @test_scalar_usub_always_overflows(
739 ; CHECK-NEXT: ret i8 0
741 %a_masked = and i8 %a, 64
742 %r = call i8 @llvm.usub.sat.i8(i8 %a_masked, i8 100)
746 define <2 x i8> @test_vector_usub_always_overflows(<2 x i8> %a) {
747 ; CHECK-LABEL: @test_vector_usub_always_overflows(
748 ; CHECK-NEXT: ret <2 x i8> zeroinitializer
750 %a_masked = and <2 x i8> %a, <i8 64, i8 64>
751 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %a_masked, <2 x i8> <i8 100, i8 100>)
755 ; neg ssub neg never overflows.
756 define i8 @test_scalar_ssub_neg_neg(i8 %a) {
757 ; CHECK-LABEL: @test_scalar_ssub_neg_neg(
758 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
759 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_NEG]], 10
760 ; CHECK-NEXT: ret i8 [[R]]
762 %a_neg = or i8 %a, -128
763 %r = call i8 @llvm.ssub.sat.i8(i8 %a_neg, i8 -10)
767 define <2 x i8> @test_vector_ssub_neg_neg(<2 x i8> %a) {
768 ; CHECK-LABEL: @test_vector_ssub_neg_neg(
769 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
770 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_NEG]], <i8 10, i8 20>
771 ; CHECK-NEXT: ret <2 x i8> [[R]]
773 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
774 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 -10, i8 -20>)
778 ; nneg ssub nneg never overflows.
779 define i8 @test_scalar_ssub_nneg_nneg(i8 %a) {
780 ; CHECK-LABEL: @test_scalar_ssub_nneg_nneg(
781 ; CHECK-NEXT: [[A_NNEG:%.*]] = and i8 [[A:%.*]], 127
782 ; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A_NNEG]], -10
783 ; CHECK-NEXT: ret i8 [[R]]
785 %a_nneg = and i8 %a, 127
786 %r = call i8 @llvm.ssub.sat.i8(i8 %a_nneg, i8 10)
790 define <2 x i8> @test_vector_ssub_nneg_nneg(<2 x i8> %a) {
791 ; CHECK-LABEL: @test_vector_ssub_nneg_nneg(
792 ; CHECK-NEXT: [[A_NNEG:%.*]] = and <2 x i8> [[A:%.*]], <i8 127, i8 127>
793 ; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i8> [[A_NNEG]], <i8 -10, i8 -20>
794 ; CHECK-NEXT: ret <2 x i8> [[R]]
796 %a_nneg = and <2 x i8> %a, <i8 127, i8 127>
797 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_nneg, <2 x i8> <i8 10, i8 20>)
801 ; neg ssub nneg may overflow.
802 define i8 @test_scalar_ssub_neg_nneg(i8 %a) {
803 ; CHECK-LABEL: @test_scalar_ssub_neg_nneg(
804 ; CHECK-NEXT: [[A_NEG:%.*]] = or i8 [[A:%.*]], -128
805 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[A_NEG]], i8 -10)
806 ; CHECK-NEXT: ret i8 [[TMP1]]
808 %a_neg = or i8 %a, -128
809 %r = call i8 @llvm.ssub.sat.i8(i8 %a_neg, i8 10)
813 define <2 x i8> @test_vector_ssub_neg_nneg(<2 x i8> %a) {
814 ; CHECK-LABEL: @test_vector_ssub_neg_nneg(
815 ; CHECK-NEXT: [[A_NEG:%.*]] = or <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
816 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[A_NEG]], <2 x i8> <i8 -10, i8 -20>)
817 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
819 %a_neg = or <2 x i8> %a, <i8 -128, i8 -128>
820 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %a_neg, <2 x i8> <i8 10, i8 20>)
824 define i8 @test_scalar_ssub_always_overflows_low(i8 %a) {
825 ; CHECK-LABEL: @test_scalar_ssub_always_overflows_low(
826 ; CHECK-NEXT: ret i8 -128
828 %cmp = icmp sgt i8 %a, 120
829 %max = select i1 %cmp, i8 %a, i8 120
830 %r = call i8 @llvm.ssub.sat.i8(i8 -10, i8 %max)
834 define i8 @test_scalar_ssub_always_overflows_high(i8 %a) {
835 ; CHECK-LABEL: @test_scalar_ssub_always_overflows_high(
836 ; CHECK-NEXT: ret i8 127
838 %cmp = icmp slt i8 %a, -120
839 %min = select i1 %cmp, i8 %a, i8 -120
840 %r = call i8 @llvm.ssub.sat.i8(i8 10, i8 %min)
844 define i8 @test_scalar_usub_add_nuw_no_ov(i8 %a) {
845 ; CHECK-LABEL: @test_scalar_usub_add_nuw_no_ov(
846 ; CHECK-NEXT: [[R:%.*]] = add i8 [[A:%.*]], 1
847 ; CHECK-NEXT: ret i8 [[R]]
849 %b = add nuw i8 %a, 10
850 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 9)
854 define i8 @test_scalar_usub_add_nuw_eq(i8 %a) {
855 ; CHECK-LABEL: @test_scalar_usub_add_nuw_eq(
856 ; CHECK-NEXT: ret i8 [[A:%.*]]
858 %b = add nuw i8 %a, 10
859 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 10)
863 define i8 @test_scalar_usub_add_nuw_may_ov(i8 %a) {
864 ; CHECK-LABEL: @test_scalar_usub_add_nuw_may_ov(
865 ; CHECK-NEXT: [[B:%.*]] = add nuw i8 [[A:%.*]], 10
866 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[B]], i8 11)
867 ; CHECK-NEXT: ret i8 [[R]]
869 %b = add nuw i8 %a, 10
870 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 11)
874 define i8 @test_scalar_usub_urem_must_ov(i8 %a) {
875 ; CHECK-LABEL: @test_scalar_usub_urem_must_ov(
876 ; CHECK-NEXT: ret i8 0
879 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 10)
883 ; Like the previous case, the result is always zero here. However, as there's
884 ; no actual overflow, we won't know about it.
885 define i8 @test_scalar_usub_urem_must_zero(i8 %a) {
886 ; CHECK-LABEL: @test_scalar_usub_urem_must_zero(
887 ; CHECK-NEXT: [[B:%.*]] = urem i8 [[A:%.*]], 10
888 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[B]], i8 9)
889 ; CHECK-NEXT: ret i8 [[R]]
892 %r = call i8 @llvm.usub.sat.i8(i8 %b, i8 9)
896 ; We have a constant range for the LHS, but only known bits for the RHS
897 define i8 @test_scalar_usub_add_nuw_known_bits(i8 %a, i8 %b) {
898 ; CHECK-LABEL: @test_scalar_usub_add_nuw_known_bits(
899 ; CHECK-NEXT: [[AA:%.*]] = add nuw i8 [[A:%.*]], 10
900 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7
901 ; CHECK-NEXT: [[R:%.*]] = sub nuw i8 [[AA]], [[BB]]
902 ; CHECK-NEXT: ret i8 [[R]]
904 %aa = add nuw i8 %a, 10
906 %r = call i8 @llvm.usub.sat.i8(i8 %aa, i8 %bb)
910 define i8 @test_scalar_usub_add_nuw_inferred(i8 %a) {
911 ; CHECK-LABEL: @test_scalar_usub_add_nuw_inferred(
912 ; CHECK-NEXT: [[B:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 10)
913 ; CHECK-NEXT: [[R:%.*]] = add nuw i8 [[B]], 9
914 ; CHECK-NEXT: ret i8 [[R]]
916 %b = call i8 @llvm.usub.sat.i8(i8 %a, i8 10)
921 define <2 x i8> @test_vector_usub_add_nuw_no_ov(<2 x i8> %a) {
922 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov(
923 ; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[A:%.*]], <i8 1, i8 1>
924 ; CHECK-NEXT: ret <2 x i8> [[R]]
926 %b = add nuw <2 x i8> %a, <i8 10, i8 10>
927 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %b, <2 x i8> <i8 9, i8 9>)
931 ; Can be optimized if the usub.sat RHS constant range handles non-splat vectors.
932 define <2 x i8> @test_vector_usub_add_nuw_no_ov_nonsplat1(<2 x i8> %a) {
933 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat1(
934 ; CHECK-NEXT: [[B:%.*]] = add nuw <2 x i8> [[A:%.*]], <i8 10, i8 10>
935 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[B]], <2 x i8> <i8 10, i8 9>)
936 ; CHECK-NEXT: ret <2 x i8> [[R]]
938 %b = add nuw <2 x i8> %a, <i8 10, i8 10>
939 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %b, <2 x i8> <i8 10, i8 9>)
943 ; Can be optimized if the add nuw RHS constant range handles non-splat vectors.
944 define <2 x i8> @test_vector_usub_add_nuw_no_ov_nonsplat2(<2 x i8> %a) {
945 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat2(
946 ; CHECK-NEXT: [[B:%.*]] = add nuw <2 x i8> [[A:%.*]], <i8 10, i8 9>
947 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[B]], <2 x i8> <i8 9, i8 9>)
948 ; CHECK-NEXT: ret <2 x i8> [[R]]
950 %b = add nuw <2 x i8> %a, <i8 10, i8 9>
951 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %b, <2 x i8> <i8 9, i8 9>)
955 ; Can be optimized if constant range is tracked per-element.
956 define <2 x i8> @test_vector_usub_add_nuw_no_ov_nonsplat3(<2 x i8> %a) {
957 ; CHECK-LABEL: @test_vector_usub_add_nuw_no_ov_nonsplat3(
958 ; CHECK-NEXT: [[B:%.*]] = add nuw <2 x i8> [[A:%.*]], <i8 10, i8 9>
959 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[B]], <2 x i8> <i8 10, i8 9>)
960 ; CHECK-NEXT: ret <2 x i8> [[R]]
962 %b = add nuw <2 x i8> %a, <i8 10, i8 9>
963 %r = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %b, <2 x i8> <i8 10, i8 9>)
967 define i8 @test_scalar_ssub_add_nsw_no_ov(i8 %a, i8 %b) {
968 ; CHECK-LABEL: @test_scalar_ssub_add_nsw_no_ov(
969 ; CHECK-NEXT: [[AA:%.*]] = add nsw i8 [[A:%.*]], 7
970 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7
971 ; CHECK-NEXT: [[R:%.*]] = sub nsw i8 [[AA]], [[BB]]
972 ; CHECK-NEXT: ret i8 [[R]]
974 %aa = add nsw i8 %a, 7
976 %r = call i8 @llvm.ssub.sat.i8(i8 %aa, i8 %bb)
980 define i8 @test_scalar_ssub_add_nsw_may_ov(i8 %a, i8 %b) {
981 ; CHECK-LABEL: @test_scalar_ssub_add_nsw_may_ov(
982 ; CHECK-NEXT: [[AA:%.*]] = add nsw i8 [[A:%.*]], 6
983 ; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 7
984 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.ssub.sat.i8(i8 [[AA]], i8 [[BB]])
985 ; CHECK-NEXT: ret i8 [[R]]
987 %aa = add nsw i8 %a, 6
989 %r = call i8 @llvm.ssub.sat.i8(i8 %aa, i8 %bb)
993 define <2 x i8> @test_vector_ssub_add_nsw_no_ov_splat(<2 x i8> %a, <2 x i8> %b) {
994 ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_splat(
995 ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 7>
996 ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 7>
997 ; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> [[AA]], [[BB]]
998 ; CHECK-NEXT: ret <2 x i8> [[R]]
1000 %aa = add nsw <2 x i8> %a, <i8 7, i8 7>
1001 %bb = and <2 x i8> %b, <i8 7, i8 7>
1002 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
1006 define <2 x i8> @test_vector_ssub_add_nsw_no_ov_nonsplat1(<2 x i8> %a, <2 x i8> %b) {
1007 ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat1(
1008 ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 7>
1009 ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 6>
1010 ; CHECK-NEXT: [[R:%.*]] = sub nsw <2 x i8> [[AA]], [[BB]]
1011 ; CHECK-NEXT: ret <2 x i8> [[R]]
1013 %aa = add nsw <2 x i8> %a, <i8 7, i8 7>
1014 %bb = and <2 x i8> %b, <i8 7, i8 6>
1015 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
1019 define <2 x i8> @test_vector_ssub_add_nsw_no_ov_nonsplat2(<2 x i8> %a, <2 x i8> %b) {
1020 ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat2(
1021 ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 8>
1022 ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 7>
1023 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]])
1024 ; CHECK-NEXT: ret <2 x i8> [[R]]
1026 %aa = add nsw <2 x i8> %a, <i8 7, i8 8>
1027 %bb = and <2 x i8> %b, <i8 7, i8 7>
1028 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
1032 define <2 x i8> @test_vector_ssub_add_nsw_no_ov_nonsplat3(<2 x i8> %a, <2 x i8> %b) {
1033 ; CHECK-LABEL: @test_vector_ssub_add_nsw_no_ov_nonsplat3(
1034 ; CHECK-NEXT: [[AA:%.*]] = add nsw <2 x i8> [[A:%.*]], <i8 7, i8 6>
1035 ; CHECK-NEXT: [[BB:%.*]] = and <2 x i8> [[B:%.*]], <i8 7, i8 6>
1036 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> [[AA]], <2 x i8> [[BB]])
1037 ; CHECK-NEXT: ret <2 x i8> [[R]]
1039 %aa = add nsw <2 x i8> %a, <i8 7, i8 6>
1040 %bb = and <2 x i8> %b, <i8 7, i8 6>
1041 %r = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %aa, <2 x i8> %bb)
1045 define i8 @test_scalar_usub_add(i8 %a, i8 %b) {
1046 ; CHECK-LABEL: @test_scalar_usub_add(
1047 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1048 ; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], [[B]]
1049 ; CHECK-NEXT: ret i8 [[RES]]
1051 %sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
1052 %res = add i8 %sat, %b
1056 define i8 @test_scalar_usub_add_extra_use(i8 %a, i8 %b, i8* %p) {
1057 ; CHECK-LABEL: @test_scalar_usub_add_extra_use(
1058 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1059 ; CHECK-NEXT: store i8 [[SAT]], i8* [[P:%.*]], align 1
1060 ; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], [[B]]
1061 ; CHECK-NEXT: ret i8 [[RES]]
1063 %sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
1064 store i8 %sat, i8* %p
1065 %res = add i8 %sat, %b
1069 define i8 @test_scalar_usub_add_commuted(i8 %a, i8 %b) {
1070 ; CHECK-LABEL: @test_scalar_usub_add_commuted(
1071 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1072 ; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], [[B]]
1073 ; CHECK-NEXT: ret i8 [[RES]]
1075 %sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
1076 %res = add i8 %b, %sat
1080 define i8 @test_scalar_usub_add_commuted_wrong(i8 %a, i8 %b) {
1081 ; CHECK-LABEL: @test_scalar_usub_add_commuted_wrong(
1082 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[B:%.*]], i8 [[A:%.*]])
1083 ; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], [[B]]
1084 ; CHECK-NEXT: ret i8 [[RES]]
1086 %sat = call i8 @llvm.usub.sat.i8(i8 %b, i8 %a)
1087 %res = add i8 %sat, %b
1091 define i8 @test_scalar_usub_add_const(i8 %a) {
1092 ; CHECK-LABEL: @test_scalar_usub_add_const(
1093 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[A:%.*]], i8 42)
1094 ; CHECK-NEXT: [[RES:%.*]] = add nuw i8 [[SAT]], 42
1095 ; CHECK-NEXT: ret i8 [[RES]]
1097 %sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 42)
1098 %res = add i8 %sat, 42
1102 define i8 @test_scalar_uadd_sub(i8 %a, i8 %b) {
1103 ; CHECK-LABEL: @test_scalar_uadd_sub(
1104 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1105 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[SAT]], [[B]]
1106 ; CHECK-NEXT: ret i8 [[RES]]
1108 %sat = call i8 @llvm.uadd.sat.i8(i8 %a, i8 %b)
1109 %res = sub i8 %sat, %b
1113 define i8 @test_scalar_uadd_sub_extra_use(i8 %a, i8 %b, i8* %p) {
1114 ; CHECK-LABEL: @test_scalar_uadd_sub_extra_use(
1115 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1116 ; CHECK-NEXT: store i8 [[SAT]], i8* [[P:%.*]], align 1
1117 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[SAT]], [[B]]
1118 ; CHECK-NEXT: ret i8 [[RES]]
1120 %sat = call i8 @llvm.uadd.sat.i8(i8 %a, i8 %b)
1121 store i8 %sat, i8* %p
1122 %res = sub i8 %sat, %b
1126 define i8 @test_scalar_uadd_sub_commuted(i8 %a, i8 %b) {
1127 ; CHECK-LABEL: @test_scalar_uadd_sub_commuted(
1128 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[B:%.*]], i8 [[A:%.*]])
1129 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[SAT]], [[B]]
1130 ; CHECK-NEXT: ret i8 [[RES]]
1132 %sat = call i8 @llvm.uadd.sat.i8(i8 %b, i8 %a)
1133 %res = sub i8 %sat, %b
1137 define i8 @test_scalar_uadd_sub_commuted_wrong(i8 %a, i8 %b) {
1138 ; CHECK-LABEL: @test_scalar_uadd_sub_commuted_wrong(
1139 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 [[B:%.*]])
1140 ; CHECK-NEXT: [[RES:%.*]] = sub i8 [[B]], [[SAT]]
1141 ; CHECK-NEXT: ret i8 [[RES]]
1143 %sat = call i8 @llvm.uadd.sat.i8(i8 %a, i8 %b)
1144 %res = sub i8 %b, %sat
1148 define i8 @test_scalar_uadd_sub_const(i8 %a) {
1149 ; CHECK-LABEL: @test_scalar_uadd_sub_const(
1150 ; CHECK-NEXT: [[SAT:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[A:%.*]], i8 42)
1151 ; CHECK-NEXT: [[RES:%.*]] = add i8 [[SAT]], -42
1152 ; CHECK-NEXT: ret i8 [[RES]]
1154 %sat = call i8 @llvm.uadd.sat.i8(i8 %a, i8 42)
1155 %res = sub i8 %sat, 42
1159 define i1 @scalar_uadd_eq_zero(i8 %a, i8 %b) {
1160 ; CHECK-LABEL: @scalar_uadd_eq_zero(
1161 ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[A:%.*]], [[B:%.*]]
1162 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0
1163 ; CHECK-NEXT: ret i1 [[TMP2]]
1165 %sat = call i8 @llvm.uadd.sat.i8(i8 %a, i8 %b)
1166 %cmp = icmp eq i8 %sat, 0
1170 define i1 @scalar_uadd_ne_zero(i8 %a, i8 %b) {
1171 ; CHECK-LABEL: @scalar_uadd_ne_zero(
1172 ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[A:%.*]], [[B:%.*]]
1173 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i8 [[TMP1]], 0
1174 ; CHECK-NEXT: ret i1 [[TMP2]]
1176 %sat = call i8 @llvm.uadd.sat.i8(i8 %a, i8 %b)
1177 %cmp = icmp ne i8 %sat, 0
1181 define i1 @scalar_usub_eq_zero(i8 %a, i8 %b) {
1182 ; CHECK-LABEL: @scalar_usub_eq_zero(
1183 ; CHECK-NEXT: [[CMP:%.*]] = icmp ule i8 [[A:%.*]], [[B:%.*]]
1184 ; CHECK-NEXT: ret i1 [[CMP]]
1186 %sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
1187 %cmp = icmp eq i8 %sat, 0
1191 define i1 @scalar_usub_ne_zero(i8 %a, i8 %b) {
1192 ; CHECK-LABEL: @scalar_usub_ne_zero(
1193 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[A:%.*]], [[B:%.*]]
1194 ; CHECK-NEXT: ret i1 [[CMP]]
1196 %sat = call i8 @llvm.usub.sat.i8(i8 %a, i8 %b)
1197 %cmp = icmp ne i8 %sat, 0
1203 define i32 @uadd_sat(i32 %x, i32 %y) {
1204 ; CHECK-LABEL: @uadd_sat(
1205 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
1206 ; CHECK-NEXT: ret i32 [[TMP1]]
1208 %notx = xor i32 %x, -1
1210 %c = icmp ult i32 %notx, %y
1211 %r = select i1 %c, i32 -1, i32 %a
1215 define i32 @uadd_sat_commute_add(i32 %xp, i32 %y) {
1216 ; CHECK-LABEL: @uadd_sat_commute_add(
1217 ; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XP:%.*]]
1218 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y:%.*]])
1219 ; CHECK-NEXT: ret i32 [[TMP1]]
1221 %x = urem i32 42, %xp ; thwart complexity-based-canonicalization
1222 %notx = xor i32 %x, -1
1224 %c = icmp ult i32 %notx, %y
1225 %r = select i1 %c, i32 -1, i32 %a
1229 define i32 @uadd_sat_ugt(i32 %x, i32 %yp) {
1230 ; CHECK-LABEL: @uadd_sat_ugt(
1231 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
1232 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
1233 ; CHECK-NEXT: ret i32 [[TMP1]]
1235 %y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
1236 %notx = xor i32 %x, -1
1238 %c = icmp ugt i32 %y, %notx
1239 %r = select i1 %c, i32 -1, i32 %a
1243 define <2 x i32> @uadd_sat_ugt_commute_add(<2 x i32> %xp, <2 x i32> %yp) {
1244 ; CHECK-LABEL: @uadd_sat_ugt_commute_add(
1245 ; CHECK-NEXT: [[Y:%.*]] = sdiv <2 x i32> [[YP:%.*]], <i32 2442, i32 4242>
1246 ; CHECK-NEXT: [[X:%.*]] = srem <2 x i32> <i32 42, i32 43>, [[XP:%.*]]
1247 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X]], <2 x i32> [[Y]])
1248 ; CHECK-NEXT: ret <2 x i32> [[TMP1]]
1250 %y = sdiv <2 x i32> %yp, <i32 2442, i32 4242> ; thwart complexity-based-canonicalization
1251 %x = srem <2 x i32> <i32 42, i32 43>, %xp ; thwart complexity-based-canonicalization
1252 %notx = xor <2 x i32> %x, <i32 -1, i32 -1>
1253 %a = add <2 x i32> %x, %y
1254 %c = icmp ugt <2 x i32> %y, %notx
1255 %r = select <2 x i1> %c, <2 x i32> <i32 -1, i32 -1>, <2 x i32> %a
1259 define i32 @uadd_sat_commute_select(i32 %x, i32 %yp) {
1260 ; CHECK-LABEL: @uadd_sat_commute_select(
1261 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
1262 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
1263 ; CHECK-NEXT: ret i32 [[TMP1]]
1265 %y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
1266 %notx = xor i32 %x, -1
1268 %c = icmp ult i32 %y, %notx
1269 %r = select i1 %c, i32 %a, i32 -1
1273 define i32 @uadd_sat_commute_select_commute_add(i32 %xp, i32 %yp) {
1274 ; CHECK-LABEL: @uadd_sat_commute_select_commute_add(
1275 ; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XP:%.*]]
1276 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[YP:%.*]], 2442
1277 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y]])
1278 ; CHECK-NEXT: ret i32 [[TMP1]]
1280 %x = urem i32 42, %xp ; thwart complexity-based-canonicalization
1281 %y = sdiv i32 %yp, 2442 ; thwart complexity-based-canonicalization
1282 %notx = xor i32 %x, -1
1284 %c = icmp ult i32 %y, %notx
1285 %r = select i1 %c, i32 %a, i32 -1
1289 define <2 x i32> @uadd_sat_commute_select_ugt(<2 x i32> %x, <2 x i32> %y) {
1290 ; CHECK-LABEL: @uadd_sat_commute_select_ugt(
1291 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]])
1292 ; CHECK-NEXT: ret <2 x i32> [[TMP1]]
1294 %notx = xor <2 x i32> %x, <i32 -1, i32 -1>
1295 %a = add <2 x i32> %y, %x
1296 %c = icmp ugt <2 x i32> %notx, %y
1297 %r = select <2 x i1> %c, <2 x i32> %a, <2 x i32> <i32 -1, i32 -1>
1301 define i32 @uadd_sat_commute_select_ugt_commute_add(i32 %xp, i32 %y) {
1302 ; CHECK-LABEL: @uadd_sat_commute_select_ugt_commute_add(
1303 ; CHECK-NEXT: [[X:%.*]] = srem i32 42, [[XP:%.*]]
1304 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X]], i32 [[Y:%.*]])
1305 ; CHECK-NEXT: ret i32 [[TMP1]]
1307 %x = srem i32 42, %xp ; thwart complexity-based-canonicalization
1308 %notx = xor i32 %x, -1
1310 %c = icmp ugt i32 %notx, %y
1311 %r = select i1 %c, i32 %a, i32 -1
1315 ; Negative test - make sure we have a -1 in the select.
1317 define i32 @not_uadd_sat(i32 %x, i32 %y) {
1318 ; CHECK-LABEL: @not_uadd_sat(
1319 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], -2
1320 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], 1
1321 ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Y:%.*]]
1322 ; CHECK-NEXT: ret i32 [[R]]
1325 %c = icmp ugt i32 %x, 1
1326 %r = select i1 %c, i32 %a, i32 %y
1330 ; Negative test - make sure the predicate is 'ult'.
1332 define i32 @not_uadd_sat2(i32 %x, i32 %y) {
1333 ; CHECK-LABEL: @not_uadd_sat2(
1334 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], -2
1335 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], 1
1336 ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 -1
1337 ; CHECK-NEXT: ret i32 [[R]]
1340 %c = icmp ugt i32 %x, 1
1341 %r = select i1 %c, i32 %a, i32 -1
1345 ; The add may include a 'not' op rather than the cmp.
1347 define i32 @uadd_sat_not(i32 %x, i32 %y) {
1348 ; CHECK-LABEL: @uadd_sat_not(
1349 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1350 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
1351 ; CHECK-NEXT: ret i32 [[TMP1]]
1353 %notx = xor i32 %x, -1
1354 %a = add i32 %notx, %y
1355 %c = icmp ult i32 %x, %y
1356 %r = select i1 %c, i32 -1, i32 %a
1360 define i32 @uadd_sat_not_commute_add(i32 %xp, i32 %yp) {
1361 ; CHECK-LABEL: @uadd_sat_not_commute_add(
1362 ; CHECK-NEXT: [[X:%.*]] = srem i32 42, [[XP:%.*]]
1363 ; CHECK-NEXT: [[Y:%.*]] = urem i32 42, [[YP:%.*]]
1364 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X]], -1
1365 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y]], i32 [[NOTX]])
1366 ; CHECK-NEXT: ret i32 [[TMP1]]
1368 %x = srem i32 42, %xp ; thwart complexity-based-canonicalization
1369 %y = urem i32 42, %yp ; thwart complexity-based-canonicalization
1370 %notx = xor i32 %x, -1
1371 %a = add i32 %y, %notx
1372 %c = icmp ult i32 %x, %y
1373 %r = select i1 %c, i32 -1, i32 %a
1377 define i32 @uadd_sat_not_ugt(i32 %x, i32 %y) {
1378 ; CHECK-LABEL: @uadd_sat_not_ugt(
1379 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1380 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
1381 ; CHECK-NEXT: ret i32 [[TMP1]]
1383 %notx = xor i32 %x, -1
1384 %a = add i32 %notx, %y
1385 %c = icmp ugt i32 %y, %x
1386 %r = select i1 %c, i32 -1, i32 %a
1390 define <2 x i32> @uadd_sat_not_ugt_commute_add(<2 x i32> %x, <2 x i32> %yp) {
1391 ; CHECK-LABEL: @uadd_sat_not_ugt_commute_add(
1392 ; CHECK-NEXT: [[Y:%.*]] = sdiv <2 x i32> [[YP:%.*]], <i32 2442, i32 4242>
1393 ; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i32> [[X:%.*]], <i32 -1, i32 -1>
1394 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[Y]], <2 x i32> [[NOTX]])
1395 ; CHECK-NEXT: ret <2 x i32> [[TMP1]]
1397 %y = sdiv <2 x i32> %yp, <i32 2442, i32 4242> ; thwart complexity-based-canonicalization
1398 %notx = xor <2 x i32> %x, <i32 -1, i32 -1>
1399 %a = add <2 x i32> %y, %notx
1400 %c = icmp ugt <2 x i32> %y, %x
1401 %r = select <2 x i1> %c, <2 x i32> <i32 -1, i32 -1>, <2 x i32> %a
1405 define i32 @uadd_sat_not_commute_select(i32 %x, i32 %y) {
1406 ; CHECK-LABEL: @uadd_sat_not_commute_select(
1407 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1408 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
1409 ; CHECK-NEXT: ret i32 [[TMP1]]
1411 %notx = xor i32 %x, -1
1412 %a = add i32 %notx, %y
1413 %c = icmp ult i32 %y, %x
1414 %r = select i1 %c, i32 %a, i32 -1
1418 define i32 @uadd_sat_not_commute_select_commute_add(i32 %x, i32 %yp) {
1419 ; CHECK-LABEL: @uadd_sat_not_commute_select_commute_add(
1420 ; CHECK-NEXT: [[Y:%.*]] = sdiv i32 42, [[YP:%.*]]
1421 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1422 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y]], i32 [[NOTX]])
1423 ; CHECK-NEXT: ret i32 [[TMP1]]
1425 %y = sdiv i32 42, %yp ; thwart complexity-based-canonicalization
1426 %notx = xor i32 %x, -1
1427 %a = add i32 %y, %notx
1428 %c = icmp ult i32 %y, %x
1429 %r = select i1 %c, i32 %a, i32 -1
1433 define <2 x i32> @uadd_sat_not_commute_select_ugt(<2 x i32> %xp, <2 x i32> %yp) {
1434 ; CHECK-LABEL: @uadd_sat_not_commute_select_ugt(
1435 ; CHECK-NEXT: [[X:%.*]] = urem <2 x i32> <i32 42, i32 -42>, [[XP:%.*]]
1436 ; CHECK-NEXT: [[Y:%.*]] = srem <2 x i32> <i32 12, i32 412>, [[YP:%.*]]
1437 ; CHECK-NEXT: [[NOTX:%.*]] = xor <2 x i32> [[X]], <i32 -1, i32 -1>
1438 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[Y]], <2 x i32> [[NOTX]])
1439 ; CHECK-NEXT: ret <2 x i32> [[TMP1]]
1441 %x = urem <2 x i32> <i32 42, i32 -42>, %xp ; thwart complexity-based-canonicalization
1442 %y = srem <2 x i32> <i32 12, i32 412>, %yp ; thwart complexity-based-canonicalization
1443 %notx = xor <2 x i32> %x, <i32 -1, i32 -1>
1444 %a = add <2 x i32> %y, %notx
1445 %c = icmp ugt <2 x i32> %x, %y
1446 %r = select <2 x i1> %c, <2 x i32> %a, <2 x i32> <i32 -1, i32 -1>
1450 define i32 @uadd_sat_not_commute_select_ugt_commute_add(i32 %x, i32 %y) {
1451 ; CHECK-LABEL: @uadd_sat_not_commute_select_ugt_commute_add(
1452 ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1
1453 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]])
1454 ; CHECK-NEXT: ret i32 [[TMP1]]
1456 %notx = xor i32 %x, -1
1457 %a = add i32 %notx, %y
1458 %c = icmp ugt i32 %x, %y
1459 %r = select i1 %c, i32 %a, i32 -1
1463 define i32 @uadd_sat_constant(i32 %x) {
1464 ; CHECK-LABEL: @uadd_sat_constant(
1465 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], 42
1466 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], -43
1467 ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 -1, i32 [[A]]
1468 ; CHECK-NEXT: ret i32 [[R]]
1471 %c = icmp ugt i32 %x, -43
1472 %r = select i1 %c, i32 -1, i32 %a
1476 define i32 @uadd_sat_constant_commute(i32 %x) {
1477 ; CHECK-LABEL: @uadd_sat_constant_commute(
1478 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 42)
1479 ; CHECK-NEXT: ret i32 [[TMP1]]
1482 %c = icmp ult i32 %x, -43
1483 %r = select i1 %c, i32 %a, i32 -1
1487 define <4 x i32> @uadd_sat_constant_vec(<4 x i32> %x) {
1488 ; CHECK-LABEL: @uadd_sat_constant_vec(
1489 ; CHECK-NEXT: [[A:%.*]] = add <4 x i32> [[X:%.*]], <i32 42, i32 42, i32 42, i32 42>
1490 ; CHECK-NEXT: [[C:%.*]] = icmp ugt <4 x i32> [[X]], <i32 -43, i32 -43, i32 -43, i32 -43>
1491 ; CHECK-NEXT: [[R:%.*]] = select <4 x i1> [[C]], <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> [[A]]
1492 ; CHECK-NEXT: ret <4 x i32> [[R]]
1494 %a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 42>
1495 %c = icmp ugt <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
1496 %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
1500 define <4 x i32> @uadd_sat_constant_vec_commute(<4 x i32> %x) {
1501 ; CHECK-LABEL: @uadd_sat_constant_vec_commute(
1502 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> [[X:%.*]], <4 x i32> <i32 42, i32 42, i32 42, i32 42>)
1503 ; CHECK-NEXT: ret <4 x i32> [[TMP1]]
1505 %a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 42>
1506 %c = icmp ult <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
1507 %r = select <4 x i1> %c, <4 x i32> %a, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
1511 define <4 x i32> @uadd_sat_constant_vec_commute_undefs(<4 x i32> %x) {
1512 ; CHECK-LABEL: @uadd_sat_constant_vec_commute_undefs(
1513 ; CHECK-NEXT: [[A:%.*]] = add <4 x i32> [[X:%.*]], <i32 42, i32 42, i32 42, i32 undef>
1514 ; CHECK-NEXT: [[C:%.*]] = icmp ult <4 x i32> [[X]], <i32 -43, i32 -43, i32 undef, i32 -43>
1515 ; CHECK-NEXT: [[R:%.*]] = select <4 x i1> [[C]], <4 x i32> [[A]], <4 x i32> <i32 -1, i32 undef, i32 -1, i32 -1>
1516 ; CHECK-NEXT: ret <4 x i32> [[R]]
1518 %a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 undef>
1519 %c = icmp ult <4 x i32> %x, <i32 -43, i32 -43, i32 undef, i32 -43>
1520 %r = select <4 x i1> %c, <4 x i32> %a, <4 x i32> <i32 -1, i32 undef, i32 -1, i32 -1>
1524 declare i32 @get_i32()
1525 declare <2 x i8> @get_v2i8()
1527 define i32 @unsigned_sat_variable_using_min_add(i32 %x) {
1528 ; CHECK-LABEL: @unsigned_sat_variable_using_min_add(
1529 ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
1530 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
1531 ; CHECK-NEXT: ret i32 [[R]]
1533 %y = call i32 @get_i32() ; thwart complexity-based canonicalization
1534 %noty = xor i32 %y, -1
1535 %c = icmp ult i32 %x, %noty
1536 %s = select i1 %c, i32 %x, i32 %noty
1541 define i32 @unsigned_sat_variable_using_min_commute_add(i32 %x) {
1542 ; CHECK-LABEL: @unsigned_sat_variable_using_min_commute_add(
1543 ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
1544 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 [[Y]])
1545 ; CHECK-NEXT: ret i32 [[R]]
1547 %y = call i32 @get_i32() ; thwart complexity-based canonicalization
1548 %noty = xor i32 %y, -1
1549 %c = icmp ult i32 %x, %noty
1550 %s = select i1 %c, i32 %x, i32 %noty
1555 define <2 x i8> @unsigned_sat_variable_using_min_commute_select(<2 x i8> %x) {
1556 ; CHECK-LABEL: @unsigned_sat_variable_using_min_commute_select(
1557 ; CHECK-NEXT: [[Y:%.*]] = call <2 x i8> @get_v2i8()
1558 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[Y]])
1559 ; CHECK-NEXT: ret <2 x i8> [[R]]
1561 %y = call <2 x i8> @get_v2i8() ; thwart complexity-based canonicalization
1562 %noty = xor <2 x i8> %y, <i8 -1, i8 -1>
1563 %c = icmp ult <2 x i8> %noty, %x
1564 %s = select <2 x i1> %c, <2 x i8> %noty, <2 x i8> %x
1565 %r = add <2 x i8> %s, %y
1569 define <2 x i8> @unsigned_sat_variable_using_min_commute_add_select(<2 x i8> %x) {
1570 ; CHECK-LABEL: @unsigned_sat_variable_using_min_commute_add_select(
1571 ; CHECK-NEXT: [[Y:%.*]] = call <2 x i8> @get_v2i8()
1572 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[Y]])
1573 ; CHECK-NEXT: ret <2 x i8> [[R]]
1575 %y = call <2 x i8> @get_v2i8() ; thwart complexity-based canonicalization
1576 %noty = xor <2 x i8> %y, <i8 -1, i8 -1>
1577 %c = icmp ult <2 x i8> %noty, %x
1578 %s = select <2 x i1> %c, <2 x i8> %noty, <2 x i8> %x
1579 %r = add <2 x i8> %y, %s
1585 define i32 @unsigned_sat_variable_using_wrong_min(i32 %x) {
1586 ; CHECK-LABEL: @unsigned_sat_variable_using_wrong_min(
1587 ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
1588 ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1
1589 ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[NOTY]], [[X:%.*]]
1590 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i32 [[X]], i32 [[NOTY]]
1591 ; CHECK-NEXT: [[R:%.*]] = add i32 [[Y]], [[S]]
1592 ; CHECK-NEXT: ret i32 [[R]]
1594 %y = call i32 @get_i32() ; thwart complexity-based canonicalization
1595 %noty = xor i32 %y, -1
1596 %c = icmp slt i32 %x, %noty
1597 %s = select i1 %c, i32 %x, i32 %noty
1604 define i32 @unsigned_sat_variable_using_wrong_value(i32 %x, i32 %z) {
1605 ; CHECK-LABEL: @unsigned_sat_variable_using_wrong_value(
1606 ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32()
1607 ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1
1608 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NOTY]], [[X:%.*]]
1609 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i32 [[X]], i32 [[NOTY]]
1610 ; CHECK-NEXT: [[R:%.*]] = add i32 [[S]], [[Z:%.*]]
1611 ; CHECK-NEXT: ret i32 [[R]]
1613 %y = call i32 @get_i32() ; thwart complexity-based canonicalization
1614 %noty = xor i32 %y, -1
1615 %c = icmp ult i32 %x, %noty
1616 %s = select i1 %c, i32 %x, i32 %noty
1621 ; If we have a constant operand, there's no commutativity variation.
1623 define i32 @unsigned_sat_constant_using_min(i32 %x) {
1624 ; CHECK-LABEL: @unsigned_sat_constant_using_min(
1625 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[X:%.*]], i32 -43)
1626 ; CHECK-NEXT: ret i32 [[R]]
1628 %c = icmp ult i32 %x, 42
1629 %s = select i1 %c, i32 %x, i32 42
1630 %r = add i32 %s, -43
1634 define <2 x i32> @unsigned_sat_constant_using_min_splat(<2 x i32> %x) {
1635 ; CHECK-LABEL: @unsigned_sat_constant_using_min_splat(
1636 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 -15, i32 -15>)
1637 ; CHECK-NEXT: ret <2 x i32> [[R]]
1639 %c = icmp ult <2 x i32> %x, <i32 14, i32 14>
1640 %s = select <2 x i1> %c, <2 x i32> %x, <2 x i32> <i32 14, i32 14>
1641 %r = add <2 x i32> %s, <i32 -15, i32 -15>
1647 define i32 @unsigned_sat_constant_using_min_wrong_constant(i32 %x) {
1648 ; CHECK-LABEL: @unsigned_sat_constant_using_min_wrong_constant(
1649 ; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], 42
1650 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i32 [[X]], i32 42
1651 ; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[S]], -42
1652 ; CHECK-NEXT: ret i32 [[R]]
1654 %c = icmp ult i32 %x, 42
1655 %s = select i1 %c, i32 %x, i32 42
1656 %r = add i32 %s, -42