1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=instcombine -S < %s | FileCheck %s
4 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone
5 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) nounwind readnone
6 declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) nounwind readnone
7 declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) nounwind readnone
8 declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone
9 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone
10 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
11 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
12 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
13 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
14 declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
15 declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
17 define i8 @uaddtest1(i8 %A, i8 %B) {
18 ; CHECK-LABEL: @uaddtest1(
19 ; CHECK-NEXT: [[Y:%.*]] = add i8 [[A:%.*]], [[B:%.*]]
20 ; CHECK-NEXT: ret i8 [[Y]]
22 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
23 %y = extractvalue { i8, i1 } %x, 0
27 define i8 @uaddtest2(i8 %A, i8 %B, ptr %overflowPtr) {
28 ; CHECK-LABEL: @uaddtest2(
29 ; CHECK-NEXT: [[AND_A:%.*]] = and i8 [[A:%.*]], 127
30 ; CHECK-NEXT: [[AND_B:%.*]] = and i8 [[B:%.*]], 127
31 ; CHECK-NEXT: [[X:%.*]] = add nuw i8 [[AND_A]], [[AND_B]]
32 ; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
33 ; CHECK-NEXT: ret i8 [[X]]
35 %and.A = and i8 %A, 127
36 %and.B = and i8 %B, 127
37 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B)
38 %y = extractvalue { i8, i1 } %x, 0
39 %z = extractvalue { i8, i1 } %x, 1
40 store i1 %z, ptr %overflowPtr
44 define i8 @uaddtest3(i8 %A, i8 %B, ptr %overflowPtr) {
45 ; CHECK-LABEL: @uaddtest3(
46 ; CHECK-NEXT: [[OR_A:%.*]] = or i8 [[A:%.*]], -128
47 ; CHECK-NEXT: [[OR_B:%.*]] = or i8 [[B:%.*]], -128
48 ; CHECK-NEXT: [[X:%.*]] = add i8 [[OR_A]], [[OR_B]]
49 ; CHECK-NEXT: store i1 true, ptr [[OVERFLOWPTR:%.*]], align 1
50 ; CHECK-NEXT: ret i8 [[X]]
52 %or.A = or i8 %A, -128
53 %or.B = or i8 %B, -128
54 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B)
55 %y = extractvalue { i8, i1 } %x, 0
56 %z = extractvalue { i8, i1 } %x, 1
57 store i1 %z, ptr %overflowPtr
61 define i8 @uaddtest4(i8 %A, ptr %overflowPtr) {
62 ; CHECK-LABEL: @uaddtest4(
63 ; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
64 ; CHECK-NEXT: ret i8 -1
66 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 %A)
67 %y = extractvalue { i8, i1 } %x, 0
68 %z = extractvalue { i8, i1 } %x, 1
69 store i1 %z, ptr %overflowPtr
73 define i8 @uaddtest5(i8 %A, ptr %overflowPtr) {
74 ; CHECK-LABEL: @uaddtest5(
75 ; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
76 ; CHECK-NEXT: ret i8 [[A:%.*]]
78 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 0, i8 %A)
79 %y = extractvalue { i8, i1 } %x, 0
80 %z = extractvalue { i8, i1 } %x, 1
81 store i1 %z, ptr %overflowPtr
85 define i1 @uaddtest6(i8 %A, i8 %B) {
86 ; CHECK-LABEL: @uaddtest6(
87 ; CHECK-NEXT: [[Z:%.*]] = icmp ugt i8 [[A:%.*]], 3
88 ; CHECK-NEXT: ret i1 [[Z]]
90 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 -4)
91 %z = extractvalue { i8, i1 } %x, 1
95 define i8 @uaddtest7(i8 %A, i8 %B) {
96 ; CHECK-LABEL: @uaddtest7(
97 ; CHECK-NEXT: [[Z:%.*]] = add i8 [[A:%.*]], [[B:%.*]]
98 ; CHECK-NEXT: ret i8 [[Z]]
100 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
101 %z = extractvalue { i8, i1 } %x, 0
106 define { i32, i1 } @saddtest_nsw(i8 %a, i8 %b) {
107 ; CHECK-LABEL: @saddtest_nsw(
108 ; CHECK-NEXT: [[AA:%.*]] = sext i8 [[A:%.*]] to i32
109 ; CHECK-NEXT: [[BB:%.*]] = sext i8 [[B:%.*]] to i32
110 ; CHECK-NEXT: [[X:%.*]] = add nsw i32 [[AA]], [[BB]]
111 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[X]], 0
112 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
114 %aa = sext i8 %a to i32
115 %bb = sext i8 %b to i32
116 %x = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %aa, i32 %bb)
120 define { i32, i1 } @uaddtest_nuw(i32 %a, i32 %b) {
121 ; CHECK-LABEL: @uaddtest_nuw(
122 ; CHECK-NEXT: [[AA:%.*]] = and i32 [[A:%.*]], 2147483647
123 ; CHECK-NEXT: [[BB:%.*]] = and i32 [[B:%.*]], 2147483647
124 ; CHECK-NEXT: [[X:%.*]] = add nuw i32 [[AA]], [[BB]]
125 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[X]], 0
126 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
128 %aa = and i32 %a, 2147483647
129 %bb = and i32 %b, 2147483647
130 %x = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %aa, i32 %bb)
134 define { i32, i1 } @ssubtest_nsw(i8 %a, i8 %b) {
135 ; CHECK-LABEL: @ssubtest_nsw(
136 ; CHECK-NEXT: [[AA:%.*]] = sext i8 [[A:%.*]] to i32
137 ; CHECK-NEXT: [[BB:%.*]] = sext i8 [[B:%.*]] to i32
138 ; CHECK-NEXT: [[X:%.*]] = sub nsw i32 [[AA]], [[BB]]
139 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[X]], 0
140 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
142 %aa = sext i8 %a to i32
143 %bb = sext i8 %b to i32
144 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %aa, i32 %bb)
148 define { i32, i1 } @usubtest_nuw(i32 %a, i32 %b) {
149 ; CHECK-LABEL: @usubtest_nuw(
150 ; CHECK-NEXT: [[AA:%.*]] = or i32 [[A:%.*]], -2147483648
151 ; CHECK-NEXT: [[BB:%.*]] = and i32 [[B:%.*]], 2147483647
152 ; CHECK-NEXT: [[X:%.*]] = sub nuw i32 [[AA]], [[BB]]
153 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[X]], 0
154 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
156 %aa = or i32 %a, 2147483648
157 %bb = and i32 %b, 2147483647
158 %x = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %aa, i32 %bb)
162 define { i32, i1 } @smultest1_nsw(i32 %a, i32 %b) {
163 ; CHECK-LABEL: @smultest1_nsw(
164 ; CHECK-NEXT: [[AA:%.*]] = and i32 [[A:%.*]], 4095
165 ; CHECK-NEXT: [[BB:%.*]] = and i32 [[B:%.*]], 524287
166 ; CHECK-NEXT: [[X:%.*]] = mul nuw nsw i32 [[AA]], [[BB]]
167 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[X]], 0
168 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
170 %aa = and i32 %a, 4095 ; 0xfff
171 %bb = and i32 %b, 524287; 0x7ffff
172 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb)
176 define { i32, i1 } @smultest2_nsw(i32 %a, i32 %b) {
177 ; CHECK-LABEL: @smultest2_nsw(
178 ; CHECK-NEXT: [[AA:%.*]] = ashr i32 [[A:%.*]], 16
179 ; CHECK-NEXT: [[BB:%.*]] = ashr i32 [[B:%.*]], 16
180 ; CHECK-NEXT: [[X:%.*]] = mul nsw i32 [[AA]], [[BB]]
181 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[X]], 0
182 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
184 %aa = ashr i32 %a, 16
185 %bb = ashr i32 %b, 16
186 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb)
190 define { i32, i1 } @smultest3_sw(i32 %a, i32 %b) {
191 ; CHECK-LABEL: @smultest3_sw(
192 ; CHECK-NEXT: [[AA:%.*]] = ashr i32 [[A:%.*]], 16
193 ; CHECK-NEXT: [[BB:%.*]] = ashr i32 [[B:%.*]], 15
194 ; CHECK-NEXT: [[X:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[AA]], i32 [[BB]])
195 ; CHECK-NEXT: ret { i32, i1 } [[X]]
197 %aa = ashr i32 %a, 16
198 %bb = ashr i32 %b, 15
199 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb)
203 define { i32, i1 } @umultest_nuw(i32 %a, i32 %b) {
204 ; CHECK-LABEL: @umultest_nuw(
205 ; CHECK-NEXT: [[AA:%.*]] = and i32 [[A:%.*]], 65535
206 ; CHECK-NEXT: [[BB:%.*]] = and i32 [[B:%.*]], 65535
207 ; CHECK-NEXT: [[X:%.*]] = mul nuw i32 [[AA]], [[BB]]
208 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[X]], 0
209 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
211 %aa = and i32 %a, 65535 ; 0xffff
212 %bb = and i32 %b, 65535 ; 0xffff
213 %x = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %aa, i32 %bb)
217 define i8 @umultest1(i8 %A, ptr %overflowPtr) {
218 ; CHECK-LABEL: @umultest1(
219 ; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
220 ; CHECK-NEXT: ret i8 0
222 %x = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 0, i8 %A)
223 %y = extractvalue { i8, i1 } %x, 0
224 %z = extractvalue { i8, i1 } %x, 1
225 store i1 %z, ptr %overflowPtr
229 define i8 @umultest2(i8 %A, ptr %overflowPtr) {
230 ; CHECK-LABEL: @umultest2(
231 ; CHECK-NEXT: store i1 false, ptr [[OVERFLOWPTR:%.*]], align 1
232 ; CHECK-NEXT: ret i8 [[A:%.*]]
234 %x = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 %A)
235 %y = extractvalue { i8, i1 } %x, 0
236 %z = extractvalue { i8, i1 } %x, 1
237 store i1 %z, ptr %overflowPtr
241 define i32 @umultest3(i32 %n) nounwind {
242 ; CHECK-LABEL: @umultest3(
243 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[N:%.*]], 2
244 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], 3
245 ; CHECK-NEXT: ret i32 [[MUL]]
247 %shr = lshr i32 %n, 2
248 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %shr, i32 3)
249 %ov = extractvalue { i32, i1 } %mul, 1
250 %res = extractvalue { i32, i1 } %mul, 0
251 %ret = select i1 %ov, i32 -1, i32 %res
255 define i32 @umultest4(i32 %n) nounwind {
256 ; CHECK-LABEL: @umultest4(
257 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[N:%.*]], 1
258 ; CHECK-NEXT: [[RES:%.*]] = and i32 [[TMP1]], -4
259 ; CHECK-NEXT: [[OV_INV:%.*]] = icmp sgt i32 [[N]], -1
260 ; CHECK-NEXT: [[RET:%.*]] = select i1 [[OV_INV]], i32 [[RES]], i32 -1
261 ; CHECK-NEXT: ret i32 [[RET]]
263 %shr = lshr i32 %n, 1
264 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %shr, i32 4)
265 %ov = extractvalue { i32, i1 } %mul, 1
266 %res = extractvalue { i32, i1 } %mul, 0
267 %ret = select i1 %ov, i32 -1, i32 %res
271 define { i32, i1 } @umultest5(i32 %x, i32 %y) nounwind {
272 ; CHECK-LABEL: @umultest5(
273 ; CHECK-NEXT: [[OR_X:%.*]] = or i32 [[X:%.*]], -2147483648
274 ; CHECK-NEXT: [[OR_Y:%.*]] = or i32 [[Y:%.*]], -2147483648
275 ; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[OR_X]], [[OR_Y]]
276 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 true }, i32 [[MUL]], 0
277 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
279 %or_x = or i32 %x, 2147483648
280 %or_y = or i32 %y, 2147483648
281 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %or_x, i32 %or_y)
285 define i1 @overflow_div_add(i32 %v1, i32 %v2) nounwind {
286 ; CHECK-LABEL: @overflow_div_add(
287 ; CHECK-NEXT: ret i1 false
289 %div = sdiv i32 %v1, 2
290 %t = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %div, i32 1)
291 %obit = extractvalue { i32, i1 } %t, 1
295 define i1 @overflow_div_sub(i32 %v1, i32 %v2) nounwind {
296 ; Check cases where the known sign bits are larger than the word size.
297 ; CHECK-LABEL: @overflow_div_sub(
298 ; CHECK-NEXT: ret i1 false
300 %a = ashr i32 %v1, 18
301 %div = sdiv i32 %a, 65536
302 %t = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %div, i32 1)
303 %obit = extractvalue { i32, i1 } %t, 1
307 define i1 @overflow_mod_mul(i32 %v1, i32 %v2) nounwind {
308 ; CHECK-LABEL: @overflow_mod_mul(
309 ; CHECK-NEXT: ret i1 false
311 %rem = srem i32 %v1, 1000
312 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
313 %obit = extractvalue { i32, i1 } %t, 1
317 define i1 @overflow_mod_overflow_mul(i32 %v1, i32 %v2) nounwind {
318 ; CHECK-LABEL: @overflow_mod_overflow_mul(
319 ; CHECK-NEXT: [[REM:%.*]] = srem i32 [[V1:%.*]], 65537
320 ; CHECK-NEXT: [[T:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[REM]], i32 [[REM]])
321 ; CHECK-NEXT: [[OBIT:%.*]] = extractvalue { i32, i1 } [[T]], 1
322 ; CHECK-NEXT: ret i1 [[OBIT]]
324 %rem = srem i32 %v1, 65537
325 ; This may overflow because the result of the mul operands may be greater than 16bits
326 ; and the result greater than 32.
327 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
328 %obit = extractvalue { i32, i1 } %t, 1
332 define i1 @overflow_mod_mul2(i16 %v1, i32 %v2) nounwind {
333 ; CHECK-LABEL: @overflow_mod_mul2(
334 ; CHECK-NEXT: ret i1 false
336 %a = sext i16 %v1 to i32
337 %rem = srem i32 %a, %v2
338 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
339 %obit = extractvalue { i32, i1 } %t, 1
343 define { i32, i1 } @ssubtest_reorder(i8 %a) {
344 ; CHECK-LABEL: @ssubtest_reorder(
345 ; CHECK-NEXT: [[AA:%.*]] = sext i8 [[A:%.*]] to i32
346 ; CHECK-NEXT: [[X:%.*]] = sub nsw i32 0, [[AA]]
347 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[X]], 0
348 ; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
350 %aa = sext i8 %a to i32
351 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 0, i32 %aa)
355 define { i32, i1 } @never_overflows_ssub_test0(i32 %a) {
356 ; CHECK-LABEL: @never_overflows_ssub_test0(
357 ; CHECK-NEXT: [[X:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[A:%.*]], 0
358 ; CHECK-NEXT: ret { i32, i1 } [[X]]
360 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 0)
364 define i1 @uadd_res_ult_x(i32 %x, i32 %y, ptr %p) nounwind {
365 ; CHECK-LABEL: @uadd_res_ult_x(
366 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
367 ; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
368 ; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
369 ; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
370 ; CHECK-NEXT: ret i1 [[D]]
372 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
373 %b = extractvalue { i32, i1 } %a, 1
375 %c = extractvalue { i32, i1 } %a, 0
376 %d = icmp ult i32 %c, %x
380 define i1 @uadd_res_ult_y(i32 %x, i32 %y, ptr %p) nounwind {
381 ; CHECK-LABEL: @uadd_res_ult_y(
382 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
383 ; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
384 ; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
385 ; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
386 ; CHECK-NEXT: ret i1 [[D]]
388 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
389 %b = extractvalue { i32, i1 } %a, 1
391 %c = extractvalue { i32, i1 } %a, 0
392 %d = icmp ult i32 %c, %y
396 define i1 @uadd_res_ugt_x(i32 %xx, i32 %y, ptr %p) nounwind {
397 ; CHECK-LABEL: @uadd_res_ugt_x(
398 ; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XX:%.*]]
399 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X]], i32 [[Y:%.*]])
400 ; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
401 ; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
402 ; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
403 ; CHECK-NEXT: ret i1 [[D]]
405 %x = urem i32 42, %xx ; Thwart complexity-based canonicalization
406 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
407 %b = extractvalue { i32, i1 } %a, 1
409 %c = extractvalue { i32, i1 } %a, 0
410 %d = icmp ugt i32 %x, %c
414 define i1 @uadd_res_ugt_y(i32 %x, i32 %yy, ptr %p) nounwind {
415 ; CHECK-LABEL: @uadd_res_ugt_y(
416 ; CHECK-NEXT: [[Y:%.*]] = urem i32 42, [[YY:%.*]]
417 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y]])
418 ; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
419 ; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
420 ; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
421 ; CHECK-NEXT: ret i1 [[D]]
423 %y = urem i32 42, %yy ; Thwart complexity-based canonicalization
424 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
425 %b = extractvalue { i32, i1 } %a, 1
427 %c = extractvalue { i32, i1 } %a, 0
428 %d = icmp ugt i32 %y, %c
432 define i1 @uadd_res_ult_const(i32 %x, ptr %p) nounwind {
433 ; CHECK-LABEL: @uadd_res_ult_const(
434 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
435 ; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
436 ; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
437 ; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
438 ; CHECK-NEXT: ret i1 [[D]]
440 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 42)
441 %b = extractvalue { i32, i1 } %a, 1
443 %c = extractvalue { i32, i1 } %a, 0
444 %d = icmp ult i32 %c, 42
448 define i1 @uadd_res_ult_const_one(i32 %x, ptr %p) nounwind {
449 ; CHECK-LABEL: @uadd_res_ult_const_one(
450 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 1)
451 ; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
452 ; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
453 ; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
454 ; CHECK-NEXT: ret i1 [[D]]
456 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 1)
457 %b = extractvalue { i32, i1 } %a, 1
459 %c = extractvalue { i32, i1 } %a, 0
460 %d = icmp ult i32 %c, 1
464 define i1 @uadd_res_ult_const_minus_one(i32 %x, ptr %p) nounwind {
465 ; CHECK-LABEL: @uadd_res_ult_const_minus_one(
466 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 -1)
467 ; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1
468 ; CHECK-NEXT: store i1 [[B]], ptr [[P:%.*]], align 1
469 ; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1
470 ; CHECK-NEXT: ret i1 [[D]]
472 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 -1)
473 %b = extractvalue { i32, i1 } %a, 1
475 %c = extractvalue { i32, i1 } %a, 0
476 %d = icmp ult i32 %c, -1
480 define { i32, i1 } @sadd_canonicalize_constant_arg0(i32 %x) nounwind {
481 ; CHECK-LABEL: @sadd_canonicalize_constant_arg0(
482 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
483 ; CHECK-NEXT: ret { i32, i1 } [[A]]
485 %a = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 42, i32 %x)
489 define { i32, i1 } @uadd_canonicalize_constant_arg0(i32 %x) nounwind {
490 ; CHECK-LABEL: @uadd_canonicalize_constant_arg0(
491 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
492 ; CHECK-NEXT: ret { i32, i1 } [[A]]
494 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 42, i32 %x)
498 define { i32, i1 } @ssub_no_canonicalize_constant_arg0(i32 %x) nounwind {
499 ; CHECK-LABEL: @ssub_no_canonicalize_constant_arg0(
500 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 42, i32 [[X:%.*]])
501 ; CHECK-NEXT: ret { i32, i1 } [[A]]
503 %a = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 42, i32 %x)
507 define { i32, i1 } @usub_no_canonicalize_constant_arg0(i32 %x) nounwind {
508 ; CHECK-LABEL: @usub_no_canonicalize_constant_arg0(
509 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 42, i32 [[X:%.*]])
510 ; CHECK-NEXT: ret { i32, i1 } [[A]]
512 %a = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 42, i32 %x)
516 define { i32, i1 } @smul_canonicalize_constant_arg0(i32 %x) nounwind {
517 ; CHECK-LABEL: @smul_canonicalize_constant_arg0(
518 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[X:%.*]], i32 42)
519 ; CHECK-NEXT: ret { i32, i1 } [[A]]
521 %a = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 42, i32 %x)
525 define { i32, i1 } @umul_canonicalize_constant_arg0(i32 %x) nounwind {
526 ; CHECK-LABEL: @umul_canonicalize_constant_arg0(
527 ; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 42)
528 ; CHECK-NEXT: ret { i32, i1 } [[A]]
530 %a = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 42, i32 %x)
534 ; Always overflow tests
536 define { i8, i1 } @uadd_always_overflow(i8 %x) nounwind {
537 ; CHECK-LABEL: @uadd_always_overflow(
538 ; CHECK-NEXT: [[A:%.*]] = and i8 [[X:%.*]], 63
539 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 poison, i1 true }, i8 [[A]], 0
540 ; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
543 %a = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %y, i8 64)
547 define { i8, i1 } @usub_always_overflow(i8 %x) nounwind {
548 ; CHECK-LABEL: @usub_always_overflow(
549 ; CHECK-NEXT: [[Y:%.*]] = or i8 [[X:%.*]], 64
550 ; CHECK-NEXT: [[A:%.*]] = sub nsw i8 63, [[Y]]
551 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 poison, i1 true }, i8 [[A]], 0
552 ; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
555 %a = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 63, i8 %y)
559 define { i8, i1 } @umul_always_overflow(i8 %x) nounwind {
560 ; CHECK-LABEL: @umul_always_overflow(
561 ; CHECK-NEXT: [[A:%.*]] = shl i8 [[X:%.*]], 1
562 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 poison, i1 true }, i8 [[A]], 0
563 ; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
566 %a = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %y, i8 2)
570 define { i8, i1 } @sadd_always_overflow(i8 %x) nounwind {
571 ; CHECK-LABEL: @sadd_always_overflow(
572 ; CHECK-NEXT: [[Y:%.*]] = call i8 @llvm.smax.i8(i8 [[X:%.*]], i8 100)
573 ; CHECK-NEXT: [[A:%.*]] = add nuw i8 [[Y]], 28
574 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 poison, i1 true }, i8 [[A]], 0
575 ; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
577 %c = icmp sgt i8 %x, 100
578 %y = select i1 %c, i8 %x, i8 100
579 %a = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %y, i8 28)
583 define { i8, i1 } @ssub_always_overflow(i8 %x) nounwind {
584 ; CHECK-LABEL: @ssub_always_overflow(
585 ; CHECK-NEXT: [[Y:%.*]] = call i8 @llvm.smax.i8(i8 [[X:%.*]], i8 29)
586 ; CHECK-NEXT: [[A:%.*]] = sub nuw i8 -100, [[Y]]
587 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 poison, i1 true }, i8 [[A]], 0
588 ; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
590 %c = icmp sgt i8 %x, 29
591 %y = select i1 %c, i8 %x, i8 29
592 %a = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 -100, i8 %y)
596 define { i8, i1 } @smul_always_overflow(i8 %x) nounwind {
597 ; CHECK-LABEL: @smul_always_overflow(
598 ; CHECK-NEXT: [[Y:%.*]] = call i8 @llvm.smax.i8(i8 [[X:%.*]], i8 100)
599 ; CHECK-NEXT: [[A:%.*]] = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[Y]], i8 2)
600 ; CHECK-NEXT: ret { i8, i1 } [[A]]
602 %c = icmp sgt i8 %x, 100
603 %y = select i1 %c, i8 %x, i8 100
604 %a = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %y, i8 2)
608 declare { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8>, <4 x i8>)
609 declare { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8>, <4 x i8>)
610 declare { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8>, <4 x i8>)
611 declare { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8>, <4 x i8>)
612 declare { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8>, <4 x i8>)
613 declare { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8>, <4 x i8>)
617 define { <4 x i8>, <4 x i1> } @always_sadd_const_vector() nounwind {
618 ; CHECK-LABEL: @always_sadd_const_vector(
619 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -128, i8 -128, i8 -128, i8 -128>, <4 x i1> <i1 true, i1 true, i1 true, i1 true> }
621 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
622 ret { <4 x i8>, <4 x i1> } %x
625 define { <4 x i8>, <4 x i1> } @always_uadd_const_vector() nounwind {
626 ; CHECK-LABEL: @always_uadd_const_vector(
627 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> zeroinitializer, <4 x i1> <i1 true, i1 true, i1 true, i1 true> }
629 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
630 ret { <4 x i8>, <4 x i1> } %x
633 define { <4 x i8>, <4 x i1> } @always_ssub_const_vector() nounwind {
634 ; CHECK-LABEL: @always_ssub_const_vector(
635 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i1> <i1 true, i1 true, i1 true, i1 true> }
637 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 -128, i8 -128, i8 -128, i8 -128>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
638 ret { <4 x i8>, <4 x i1> } %x
641 define { <4 x i8>, <4 x i1> } @always_usub_const_vector() nounwind {
642 ; CHECK-LABEL: @always_usub_const_vector(
643 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i1> <i1 true, i1 true, i1 true, i1 true> }
645 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 0, i8 0, i8 0, i8 0>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
646 ret { <4 x i8>, <4 x i1> } %x
649 ; NOTE: LLVM doesn't (yet) detect the multiplication always results in a overflow
650 define { <4 x i8>, <4 x i1> } @always_smul_const_vector() nounwind {
651 ; CHECK-LABEL: @always_smul_const_vector(
652 ; CHECK-NEXT: [[X:%.*]] = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i8> <i8 3, i8 3, i8 3, i8 3>)
653 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } [[X]]
655 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i8> <i8 3, i8 3, i8 3, i8 3>)
656 ret { <4 x i8>, <4 x i1> } %x
659 define { <4 x i8>, <4 x i1> } @always_umul_const_vector() nounwind {
660 ; CHECK-LABEL: @always_umul_const_vector(
661 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -3, i8 -3, i8 -3, i8 -3>, <4 x i1> <i1 true, i1 true, i1 true, i1 true> }
663 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 3, i8 3, i8 3, i8 3>)
664 ret { <4 x i8>, <4 x i1> } %x
669 define { <4 x i8>, <4 x i1> } @never_sadd_const_vector() nounwind {
670 ; CHECK-LABEL: @never_sadd_const_vector(
671 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -50, i8 -10, i8 0, i8 60>, <4 x i1> zeroinitializer }
673 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 -10, i8 -20, i8 30, i8 40>, <4 x i8> <i8 -40, i8 10, i8 -30, i8 20>)
674 ret { <4 x i8>, <4 x i1> } %x
677 define { <4 x i8>, <4 x i1> } @never_uadd_const_vector() nounwind {
678 ; CHECK-LABEL: @never_uadd_const_vector(
679 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 32, i8 64, i8 96, i8 48>, <4 x i1> zeroinitializer }
681 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 0, i8 32, i8 64, i8 16>, <4 x i8> <i8 32, i8 32, i8 32, i8 32>)
682 ret { <4 x i8>, <4 x i1> } %x
685 define { <4 x i8>, <4 x i1> } @never_ssub_const_vector() nounwind {
686 ; CHECK-LABEL: @never_ssub_const_vector(
687 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 0, i8 10, i8 20, i8 30>, <4 x i1> zeroinitializer }
689 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 -10, i8 -10, i8 -10, i8 -10>, <4 x i8> <i8 -10, i8 -20, i8 -30, i8 -40>)
690 ret { <4 x i8>, <4 x i1> } %x
693 define { <4 x i8>, <4 x i1> } @never_usub_const_vector() nounwind {
694 ; CHECK-LABEL: @never_usub_const_vector(
695 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 127, i8 -1, i8 0, i8 -2>, <4 x i1> zeroinitializer }
697 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 128, i8 0, i8 255, i8 1>)
698 ret { <4 x i8>, <4 x i1> } %x
701 define { <4 x i8>, <4 x i1> } @never_smul_const_vector() nounwind {
702 ; CHECK-LABEL: @never_smul_const_vector(
703 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -54, i8 -18, i8 -60, i8 -90>, <4 x i1> zeroinitializer }
705 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 -6, i8 -6, i8 -6, i8 -6>, <4 x i8> <i8 9, i8 3, i8 10, i8 15>)
706 ret { <4 x i8>, <4 x i1> } %x
709 define { <4 x i8>, <4 x i1> } @never_umul_const_vector() nounwind {
710 ; CHECK-LABEL: @never_umul_const_vector(
711 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -31, i8 120, i8 60, i8 30>, <4 x i1> zeroinitializer }
713 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 15, i8 15, i8 15, i8 15>, <4 x i8> <i8 15, i8 8, i8 4, i8 2>)
714 ret { <4 x i8>, <4 x i1> } %x
719 define { <4 x i8>, <4 x i1> } @neutral_sadd_const_vector() nounwind {
720 ; CHECK-LABEL: @neutral_sadd_const_vector(
721 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer }
723 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>)
724 ret { <4 x i8>, <4 x i1> } %x
727 define { <4 x i8>, <4 x i1> } @neutral_uadd_const_vector() nounwind {
728 ; CHECK-LABEL: @neutral_uadd_const_vector(
729 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer }
731 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>)
732 ret { <4 x i8>, <4 x i1> } %x
735 define { <4 x i8>, <4 x i1> } @neutral_ssub_const_vector() nounwind {
736 ; CHECK-LABEL: @neutral_ssub_const_vector(
737 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer }
739 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>)
740 ret { <4 x i8>, <4 x i1> } %x
743 define { <4 x i8>, <4 x i1> } @neutral_usub_const_vector() nounwind {
744 ; CHECK-LABEL: @neutral_usub_const_vector(
745 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer }
747 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>)
748 ret { <4 x i8>, <4 x i1> } %x
751 define { <4 x i8>, <4 x i1> } @neutral_smul_const_vector() nounwind {
752 ; CHECK-LABEL: @neutral_smul_const_vector(
753 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer }
755 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
756 ret { <4 x i8>, <4 x i1> } %x
759 define { <4 x i8>, <4 x i1> } @neutral_umul_const_vector() nounwind {
760 ; CHECK-LABEL: @neutral_umul_const_vector(
761 ; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer }
763 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>)
764 ret { <4 x i8>, <4 x i1> } %x
767 define i8 @smul_neg1(i8 %x, ptr %p) {
768 ; CHECK-LABEL: @smul_neg1(
769 ; CHECK-NEXT: [[R:%.*]] = sub i8 0, [[X:%.*]]
770 ; CHECK-NEXT: [[OV:%.*]] = icmp eq i8 [[X]], -128
771 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
772 ; CHECK-NEXT: ret i8 [[R]]
774 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 -1)
775 %r = extractvalue { i8, i1 } %m, 0
776 %ov = extractvalue { i8, i1 } %m, 1
781 define <4 x i8> @smul_neg1_vec(<4 x i8> %x, ptr %p) {
782 ; CHECK-LABEL: @smul_neg1_vec(
783 ; CHECK-NEXT: [[R:%.*]] = sub <4 x i8> zeroinitializer, [[X:%.*]]
784 ; CHECK-NEXT: [[OV:%.*]] = icmp eq <4 x i8> [[X]], <i8 -128, i8 -128, i8 -128, i8 -128>
785 ; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
786 ; CHECK-NEXT: ret <4 x i8> [[R]]
788 %m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>)
789 %r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
790 %ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
791 store <4 x i1> %ov, ptr %p
795 define <4 x i8> @smul_neg1_vec_poison(<4 x i8> %x, ptr %p) {
796 ; CHECK-LABEL: @smul_neg1_vec_poison(
797 ; CHECK-NEXT: [[R:%.*]] = sub <4 x i8> zeroinitializer, [[X:%.*]]
798 ; CHECK-NEXT: [[OV:%.*]] = icmp eq <4 x i8> [[X]], <i8 -128, i8 -128, i8 -128, i8 -128>
799 ; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
800 ; CHECK-NEXT: ret <4 x i8> [[R]]
802 %m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 poison, i8 -1>)
803 %r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
804 %ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
805 store <4 x i1> %ov, ptr %p
809 define i8 @smul_neg2(i8 %x, ptr %p) {
810 ; CHECK-LABEL: @smul_neg2(
811 ; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[X:%.*]], i8 -2)
812 ; CHECK-NEXT: [[R:%.*]] = extractvalue { i8, i1 } [[M]], 0
813 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i8, i1 } [[M]], 1
814 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
815 ; CHECK-NEXT: ret i8 [[R]]
817 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 -2)
818 %r = extractvalue { i8, i1 } %m, 0
819 %ov = extractvalue { i8, i1 } %m, 1
824 define i8 @umul_neg1(i8 %x, ptr %p) {
825 ; CHECK-LABEL: @umul_neg1(
826 ; CHECK-NEXT: [[R:%.*]] = sub i8 0, [[X:%.*]]
827 ; CHECK-NEXT: [[OV:%.*]] = icmp ugt i8 [[X]], 1
828 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
829 ; CHECK-NEXT: ret i8 [[R]]
831 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 -1)
832 %r = extractvalue { i8, i1 } %m, 0
833 %ov = extractvalue { i8, i1 } %m, 1
838 define <4 x i8> @umul_neg1_vec(<4 x i8> %x, ptr %p) {
839 ; CHECK-LABEL: @umul_neg1_vec(
840 ; CHECK-NEXT: [[R:%.*]] = sub <4 x i8> zeroinitializer, [[X:%.*]]
841 ; CHECK-NEXT: [[OV:%.*]] = icmp ugt <4 x i8> [[X]], <i8 1, i8 1, i8 1, i8 1>
842 ; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
843 ; CHECK-NEXT: ret <4 x i8> [[R]]
845 %m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>)
846 %r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
847 %ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
848 store <4 x i1> %ov, ptr %p
852 define <4 x i8> @umul_neg1_vec_poison(<4 x i8> %x, ptr %p) {
853 ; CHECK-LABEL: @umul_neg1_vec_poison(
854 ; CHECK-NEXT: [[R:%.*]] = sub <4 x i8> zeroinitializer, [[X:%.*]]
855 ; CHECK-NEXT: [[OV:%.*]] = icmp ugt <4 x i8> [[X]], <i8 1, i8 1, i8 1, i8 1>
856 ; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
857 ; CHECK-NEXT: ret <4 x i8> [[R]]
859 %m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 -1, i8 -1, i8 poison>)
860 %r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
861 %ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
862 store <4 x i1> %ov, ptr %p
866 define <4 x i1> @smul_not_neg1_vec(<4 x i8> %x) {
867 ; CHECK-LABEL: @smul_not_neg1_vec(
868 ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i8> [[X:%.*]], <i8 -43, i8 -43, i8 -43, i8 -43>
869 ; CHECK-NEXT: [[OV:%.*]] = icmp ult <4 x i8> [[TMP1]], <i8 -85, i8 -85, i8 -85, i8 -85>
870 ; CHECK-NEXT: ret <4 x i1> [[OV]]
872 %m = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 -3, i8 -3, i8 poison, i8 -3>)
873 %ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
879 define i8 @umul_neg1_select(i8 %x) {
880 ; CHECK-LABEL: @umul_neg1_select(
881 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i8 [[X:%.*]], 0
882 ; CHECK-NEXT: [[R:%.*]] = sext i1 [[TMP1]] to i8
883 ; CHECK-NEXT: ret i8 [[R]]
885 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 -1)
886 %m0 = extractvalue { i8, i1 } %m, 0
887 %m1 = extractvalue { i8, i1 } %m, 1
888 %r = select i1 %m1, i8 -1, i8 %m0
892 define i8 @umul_2(i8 %x, ptr %p) {
893 ; CHECK-LABEL: @umul_2(
894 ; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 1
895 ; CHECK-NEXT: [[OV:%.*]] = icmp slt i8 [[X]], 0
896 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
897 ; CHECK-NEXT: ret i8 [[R]]
899 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 2)
900 %r = extractvalue { i8, i1 } %m, 0
901 %ov = extractvalue { i8, i1 } %m, 1
906 define i8 @umul_8(i8 %x, ptr %p) {
907 ; CHECK-LABEL: @umul_8(
908 ; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 3
909 ; CHECK-NEXT: [[OV:%.*]] = icmp ugt i8 [[X]], 31
910 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
911 ; CHECK-NEXT: ret i8 [[R]]
913 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 8)
914 %r = extractvalue { i8, i1 } %m, 0
915 %ov = extractvalue { i8, i1 } %m, 1
920 define i8 @umul_64(i8 %x, ptr %p) {
921 ; CHECK-LABEL: @umul_64(
922 ; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 6
923 ; CHECK-NEXT: [[OV:%.*]] = icmp ugt i8 [[X]], 3
924 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
925 ; CHECK-NEXT: ret i8 [[R]]
927 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 64)
928 %r = extractvalue { i8, i1 } %m, 0
929 %ov = extractvalue { i8, i1 } %m, 1
934 define i8 @umul_256(i8 %x, ptr %p) {
935 ; CHECK-LABEL: @umul_256(
936 ; CHECK-NEXT: store i1 false, ptr [[P:%.*]], align 1
937 ; CHECK-NEXT: ret i8 0
939 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 256)
940 %r = extractvalue { i8, i1 } %m, 0
941 %ov = extractvalue { i8, i1 } %m, 1
946 define <4 x i8> @umul_4_vec_poison(<4 x i8> %x, ptr %p) {
947 ; CHECK-LABEL: @umul_4_vec_poison(
948 ; CHECK-NEXT: [[R:%.*]] = shl <4 x i8> [[X:%.*]], <i8 2, i8 2, i8 2, i8 2>
949 ; CHECK-NEXT: [[OV:%.*]] = icmp ugt <4 x i8> [[X]], <i8 63, i8 63, i8 63, i8 63>
950 ; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
951 ; CHECK-NEXT: ret <4 x i8> [[R]]
953 %m = tail call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 4, i8 4, i8 poison>)
954 %r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
955 %ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
956 store <4 x i1> %ov, ptr %p
960 ; Negative test: not PowerOf2
962 define i8 @umul_3(i8 %x, ptr %p) {
963 ; CHECK-LABEL: @umul_3(
964 ; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 3)
965 ; CHECK-NEXT: [[R:%.*]] = extractvalue { i8, i1 } [[M]], 0
966 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i8, i1 } [[M]], 1
967 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
968 ; CHECK-NEXT: ret i8 [[R]]
970 %m = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %x, i8 3)
971 %r = extractvalue { i8, i1 } %m, 0
972 %ov = extractvalue { i8, i1 } %m, 1
977 define i8 @smul_4(i8 %x, ptr %p) {
978 ; CHECK-LABEL: @smul_4(
979 ; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 2
980 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -32
981 ; CHECK-NEXT: [[OV:%.*]] = icmp ult i8 [[TMP1]], -64
982 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
983 ; CHECK-NEXT: ret i8 [[R]]
985 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 4)
986 %r = extractvalue { i8, i1 } %m, 0
987 %ov = extractvalue { i8, i1 } %m, 1
992 define i8 @smul_16(i8 %x, ptr %p) {
993 ; CHECK-LABEL: @smul_16(
994 ; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 4
995 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -8
996 ; CHECK-NEXT: [[OV:%.*]] = icmp ult i8 [[TMP1]], -16
997 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
998 ; CHECK-NEXT: ret i8 [[R]]
1000 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 16)
1001 %r = extractvalue { i8, i1 } %m, 0
1002 %ov = extractvalue { i8, i1 } %m, 1
1003 store i1 %ov, ptr %p
1007 define i8 @smul_32(i8 %x, ptr %p) {
1008 ; CHECK-LABEL: @smul_32(
1009 ; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 5
1010 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], -4
1011 ; CHECK-NEXT: [[OV:%.*]] = icmp ult i8 [[TMP1]], -8
1012 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
1013 ; CHECK-NEXT: ret i8 [[R]]
1015 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 32)
1016 %r = extractvalue { i8, i1 } %m, 0
1017 %ov = extractvalue { i8, i1 } %m, 1
1018 store i1 %ov, ptr %p
1022 define i8 @smul_128(i8 %x, ptr %p) {
1023 ; CHECK-LABEL: @smul_128(
1024 ; CHECK-NEXT: [[R:%.*]] = shl i8 [[X:%.*]], 7
1025 ; CHECK-NEXT: [[OV:%.*]] = icmp ugt i8 [[X]], 1
1026 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
1027 ; CHECK-NEXT: ret i8 [[R]]
1029 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 128)
1030 %r = extractvalue { i8, i1 } %m, 0
1031 %ov = extractvalue { i8, i1 } %m, 1
1032 store i1 %ov, ptr %p
1036 define <4 x i8> @smul_2_vec_poison(<4 x i8> %x, ptr %p) {
1037 ; CHECK-LABEL: @smul_2_vec_poison(
1038 ; CHECK-NEXT: [[R:%.*]] = shl <4 x i8> [[X:%.*]], <i8 1, i8 1, i8 1, i8 1>
1039 ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i8> [[X]], <i8 64, i8 64, i8 64, i8 64>
1040 ; CHECK-NEXT: [[OV:%.*]] = icmp slt <4 x i8> [[TMP1]], zeroinitializer
1041 ; CHECK-NEXT: store <4 x i1> [[OV]], ptr [[P:%.*]], align 1
1042 ; CHECK-NEXT: ret <4 x i8> [[R]]
1044 %m = tail call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> %x, <4 x i8> <i8 poison, i8 2, i8 2, i8 poison>)
1045 %r = extractvalue { <4 x i8>, <4 x i1> } %m, 0
1046 %ov = extractvalue { <4 x i8>, <4 x i1> } %m, 1
1047 store <4 x i1> %ov, ptr %p
1051 ; Negative test: not PowerOf2
1053 define i8 @smul_7(i8 %x, ptr %p) {
1054 ; CHECK-LABEL: @smul_7(
1055 ; CHECK-NEXT: [[M:%.*]] = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[X:%.*]], i8 7)
1056 ; CHECK-NEXT: [[R:%.*]] = extractvalue { i8, i1 } [[M]], 0
1057 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i8, i1 } [[M]], 1
1058 ; CHECK-NEXT: store i1 [[OV]], ptr [[P:%.*]], align 1
1059 ; CHECK-NEXT: ret i8 [[R]]
1061 %m = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %x, i8 7)
1062 %r = extractvalue { i8, i1 } %m, 0
1063 %ov = extractvalue { i8, i1 } %m, 1
1064 store i1 %ov, ptr %p