Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / builtins-overflow.c
blob4babc05759dc8afe5763ed34b2c586c0ab5613fb
1 // Test CodeGen for Security Check Overflow Builtins.
3 // RUN: %clang_cc1 -triple "i686-unknown-unknown" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i32 -DLONG_MAX=2147483647 %s
4 // RUN: %clang_cc1 -triple "x86_64-unknown-unknown" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i64 -DLONG_MAX=9223372036854775807 %s
5 // RUN: %clang_cc1 -triple "x86_64-mingw32" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i32 -DLONG_MAX=2147483647 %s
7 extern unsigned UnsignedErrorCode;
8 extern unsigned long UnsignedLongErrorCode;
9 extern unsigned long long UnsignedLongLongErrorCode;
10 extern int IntErrorCode;
11 extern long LongErrorCode;
12 extern long long LongLongErrorCode;
13 void overflowed(void);
15 unsigned test_add_overflow_uint_uint_uint(unsigned x, unsigned y) {
16 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_uint_uint_uint
17 // CHECK-NOT: ext
18 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
19 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
20 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
21 // CHECK: store i32 [[Q]], ptr
22 // CHECK: br i1 [[C]]
23 unsigned r;
24 if (__builtin_add_overflow(x, y, &r))
25 overflowed();
26 return r;
29 int test_add_overflow_int_int_int(int x, int y) {
30 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_int_int_int
31 // CHECK-NOT: ext
32 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
33 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
34 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
35 // CHECK: store i32 [[Q]], ptr
36 // CHECK: br i1 [[C]]
37 int r;
38 if (__builtin_add_overflow(x, y, &r))
39 overflowed();
40 return r;
43 int test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) {
44 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_xint31_xint31_xint31({{.+}})
45 // CHECK-NOT: ext
46 // CHECK: [[S:%.+]] = call { i31, i1 } @llvm.sadd.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}})
47 // CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1
48 // CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0
49 // CHECK: store i31 [[Q]], ptr
50 // CHECK: br i1 [[C]]
51 _BitInt(31) r;
52 if (__builtin_add_overflow(x, y, &r))
53 overflowed();
54 return r;
57 unsigned test_sub_overflow_uint_uint_uint(unsigned x, unsigned y) {
58 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_uint_uint_uint
59 // CHECK-NOT: ext
60 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
61 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
62 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
63 // CHECK: store i32 [[Q]], ptr
64 // CHECK: br i1 [[C]]
65 unsigned r;
66 if (__builtin_sub_overflow(x, y, &r))
67 overflowed();
68 return r;
71 int test_sub_overflow_int_int_int(int x, int y) {
72 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_int_int_int
73 // CHECK-NOT: ext
74 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
75 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
76 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
77 // CHECK: store i32 [[Q]], ptr
78 // CHECK: br i1 [[C]]
79 int r;
80 if (__builtin_sub_overflow(x, y, &r))
81 overflowed();
82 return r;
85 int test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) {
86 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_xint31_xint31_xint31({{.+}})
87 // CHECK-NOT: ext
88 // CHECK: [[S:%.+]] = call { i31, i1 } @llvm.ssub.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}})
89 // CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1
90 // CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0
91 // CHECK: store i31 [[Q]], ptr
92 // CHECK: br i1 [[C]]
93 _BitInt(31) r;
94 if (__builtin_sub_overflow(x, y, &r))
95 overflowed();
96 return r;
99 unsigned test_mul_overflow_uint_uint_uint(unsigned x, unsigned y) {
100 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_uint
101 // CHECK-NOT: ext
102 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
103 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
104 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
105 // CHECK: store i32 [[Q]], ptr
106 // CHECK: br i1 [[C]]
107 unsigned r;
108 if (__builtin_mul_overflow(x, y, &r))
109 overflowed();
110 return r;
113 int test_mul_overflow_uint_uint_int(unsigned x, unsigned y) {
114 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_int
115 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
116 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
117 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
118 // CHECK: [[C1:%.+]] = icmp ugt i32 [[Q]], 2147483647
119 // CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]]
120 // CHECK: store i32 [[Q]], ptr
121 // CHECK: br i1 [[C2]]
122 int r;
123 if (__builtin_mul_overflow(x, y, &r))
124 overflowed();
125 return r;
128 int test_mul_overflow_uint_uint_int_volatile(unsigned x, unsigned y) {
129 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_int_volatile
130 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
131 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
132 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
133 // CHECK: [[C1:%.+]] = icmp ugt i32 [[Q]], 2147483647
134 // CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]]
135 // CHECK: store volatile i32 [[Q]], ptr
136 // CHECK: br i1 [[C2]]
137 volatile int r;
138 if (__builtin_mul_overflow(x, y, &r))
139 overflowed();
140 return r;
143 long test_mul_overflow_ulong_ulong_long(unsigned long x, unsigned long y) {
144 // CHECK-LABEL: @test_mul_overflow_ulong_ulong_long
145 // CHECK: [[S:%.+]] = call { [[LONG_TYPE]], i1 } @llvm.umul.with.overflow.[[LONG_TYPE]]([[LONG_TYPE]] %{{.+}}, [[LONG_TYPE]] %{{.+}})
146 // CHECK-DAG: [[Q:%.+]] = extractvalue { [[LONG_TYPE]], i1 } [[S]], 0
147 // CHECK-DAG: [[C:%.+]] = extractvalue { [[LONG_TYPE]], i1 } [[S]], 1
148 // CHECK: [[C1:%.+]] = icmp ugt [[LONG_TYPE]] [[Q]], [[LONG_MAX]]
149 // CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]]
150 // LONG64: store [[LONG_TYPE]] [[Q]], ptr
151 // LONG64: br i1 [[C2]]
152 long r;
153 if (__builtin_mul_overflow(x, y, &r))
154 overflowed();
155 return r;
158 int test_mul_overflow_int_int_int(int x, int y) {
159 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_int_int_int
160 // CHECK-NOT: ext
161 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
162 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
163 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
164 // CHECK: store i32 [[Q]], ptr
165 // CHECK: br i1 [[C]]
166 int r;
167 if (__builtin_mul_overflow(x, y, &r))
168 overflowed();
169 return r;
172 int test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) {
173 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint31_xint31_xint31({{.+}})
174 // CHECK-NOT: ext
175 // CHECK: [[S:%.+]] = call { i31, i1 } @llvm.smul.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}})
176 // CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1
177 // CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0
178 // CHECK: store i31 [[Q]], ptr
179 // CHECK: br i1 [[C]]
180 _BitInt(31) r;
181 if (__builtin_mul_overflow(x, y, &r))
182 overflowed();
183 return r;
186 int test_mul_overflow_xint127_xint127_xint127(_BitInt(127) x, _BitInt(127) y) {
187 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint127_xint127_xint127({{.+}})
188 // CHECK-NOT: ext
189 // CHECK: [[S:%.+]] = call { i127, i1 } @llvm.smul.with.overflow.i127(i127 %{{.+}}, i127 %{{.+}})
190 // CHECK-DAG: [[C:%.+]] = extractvalue { i127, i1 } [[S]], 1
191 // CHECK-DAG: [[Q:%.+]] = extractvalue { i127, i1 } [[S]], 0
192 // CHECK: store i127 [[Q]], ptr
193 // CHECK: br i1 [[C]]
194 _BitInt(127) r;
195 if (__builtin_mul_overflow(x, y, &r))
196 overflowed();
197 return r;
200 int test_mul_overflow_xint128_xint128_xint128(_BitInt(128) x, _BitInt(128) y) {
201 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint128_xint128_xint128({{.+}})
202 // CHECK-NOT: ext
203 // CHECK: [[S:%.+]] = call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %{{.+}}, i128 %{{.+}})
204 // CHECK-DAG: [[C:%.+]] = extractvalue { i128, i1 } [[S]], 1
205 // CHECK-DAG: [[Q:%.+]] = extractvalue { i128, i1 } [[S]], 0
206 // CHECK: store i128 [[Q]], ptr
207 // CHECK: br i1 [[C]]
208 _BitInt(128) r;
209 if (__builtin_mul_overflow(x, y, &r))
210 overflowed();
211 return r;
214 int test_add_overflow_uint_int_int(unsigned x, int y) {
215 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_uint_int_int
216 // CHECK: [[XE:%.+]] = zext i32 %{{.+}} to i33
217 // CHECK: [[YE:%.+]] = sext i32 %{{.+}} to i33
218 // CHECK: [[S:%.+]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 [[XE]], i33 [[YE]])
219 // CHECK-DAG: [[Q:%.+]] = extractvalue { i33, i1 } [[S]], 0
220 // CHECK-DAG: [[C1:%.+]] = extractvalue { i33, i1 } [[S]], 1
221 // CHECK: [[QT:%.+]] = trunc i33 [[Q]] to i32
222 // CHECK: [[QTE:%.+]] = sext i32 [[QT]] to i33
223 // CHECK: [[C2:%.+]] = icmp ne i33 [[Q]], [[QTE]]
224 // CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
225 // CHECK: store i32 [[QT]], ptr
226 // CHECK: br i1 [[C3]]
227 int r;
228 if (__builtin_add_overflow(x, y, &r))
229 overflowed();
230 return r;
233 _Bool test_add_overflow_uint_uint_bool(unsigned x, unsigned y) {
234 // CHECK-LABEL: define {{.*}} i1 @test_add_overflow_uint_uint_bool
235 // CHECK-NOT: ext
236 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
237 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
238 // CHECK-DAG: [[C1:%.+]] = extractvalue { i32, i1 } [[S]], 1
239 // CHECK: [[QT:%.+]] = trunc i32 [[Q]] to i1
240 // CHECK: [[QTE:%.+]] = zext i1 [[QT]] to i32
241 // CHECK: [[C2:%.+]] = icmp ne i32 [[Q]], [[QTE]]
242 // CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
243 // CHECK: [[QT2:%.+]] = zext i1 [[QT]] to i8
244 // CHECK: store i8 [[QT2]], ptr
245 // CHECK: br i1 [[C3]]
246 _Bool r;
247 if (__builtin_add_overflow(x, y, &r))
248 overflowed();
249 return r;
252 unsigned test_add_overflow_bool_bool_uint(_Bool x, _Bool y) {
253 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_bool_bool_uint
254 // CHECK: [[XE:%.+]] = zext i1 %{{.+}} to i32
255 // CHECK: [[YE:%.+]] = zext i1 %{{.+}} to i32
256 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[XE]], i32 [[YE]])
257 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
258 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
259 // CHECK: store i32 [[Q]], ptr
260 // CHECK: br i1 [[C]]
261 unsigned r;
262 if (__builtin_add_overflow(x, y, &r))
263 overflowed();
264 return r;
267 _Bool test_add_overflow_bool_bool_bool(_Bool x, _Bool y) {
268 // CHECK-LABEL: define {{.*}} i1 @test_add_overflow_bool_bool_bool
269 // CHECK: [[S:%.+]] = call { i1, i1 } @llvm.uadd.with.overflow.i1(i1 %{{.+}}, i1 %{{.+}})
270 // CHECK-DAG: [[Q:%.+]] = extractvalue { i1, i1 } [[S]], 0
271 // CHECK-DAG: [[C:%.+]] = extractvalue { i1, i1 } [[S]], 1
272 // CHECK: [[QT2:%.+]] = zext i1 [[Q]] to i8
273 // CHECK: store i8 [[QT2]], ptr
274 // CHECK: br i1 [[C]]
275 _Bool r;
276 if (__builtin_add_overflow(x, y, &r))
277 overflowed();
278 return r;
281 int test_add_overflow_volatile(int x, int y) {
282 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_volatile
283 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
284 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
285 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
286 // CHECK: store volatile i32 [[Q]], ptr
287 // CHECK: br i1 [[C]]
288 volatile int result;
289 if (__builtin_add_overflow(x, y, &result))
290 overflowed();
291 return result;
294 unsigned test_uadd_overflow(unsigned x, unsigned y) {
295 // CHECK: @test_uadd_overflow
296 // CHECK: %{{.+}} = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
297 unsigned result;
298 if (__builtin_uadd_overflow(x, y, &result))
299 return UnsignedErrorCode;
300 return result;
303 unsigned long test_uaddl_overflow(unsigned long x, unsigned long y) {
304 // CHECK: @test_uaddl_overflow([[UL:i32|i64]] noundef %x
305 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
306 unsigned long result;
307 if (__builtin_uaddl_overflow(x, y, &result))
308 return UnsignedLongErrorCode;
309 return result;
312 unsigned long long test_uaddll_overflow(unsigned long long x, unsigned long long y) {
313 // CHECK: @test_uaddll_overflow
314 // CHECK: %{{.+}} = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
315 unsigned long long result;
316 if (__builtin_uaddll_overflow(x, y, &result))
317 return UnsignedLongLongErrorCode;
318 return result;
321 unsigned test_usub_overflow(unsigned x, unsigned y) {
322 // CHECK: @test_usub_overflow
323 // CHECK: %{{.+}} = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
324 unsigned result;
325 if (__builtin_usub_overflow(x, y, &result))
326 return UnsignedErrorCode;
327 return result;
330 unsigned long test_usubl_overflow(unsigned long x, unsigned long y) {
331 // CHECK: @test_usubl_overflow([[UL:i32|i64]] noundef %x
332 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
333 unsigned long result;
334 if (__builtin_usubl_overflow(x, y, &result))
335 return UnsignedLongErrorCode;
336 return result;
339 unsigned long long test_usubll_overflow(unsigned long long x, unsigned long long y) {
340 // CHECK: @test_usubll_overflow
341 // CHECK: %{{.+}} = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
342 unsigned long long result;
343 if (__builtin_usubll_overflow(x, y, &result))
344 return UnsignedLongLongErrorCode;
345 return result;
348 unsigned test_umul_overflow(unsigned x, unsigned y) {
349 // CHECK: @test_umul_overflow
350 // CHECK: %{{.+}} = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
351 unsigned result;
352 if (__builtin_umul_overflow(x, y, &result))
353 return UnsignedErrorCode;
354 return result;
357 unsigned long test_umull_overflow(unsigned long x, unsigned long y) {
358 // CHECK: @test_umull_overflow([[UL:i32|i64]] noundef %x
359 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.umul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
360 unsigned long result;
361 if (__builtin_umull_overflow(x, y, &result))
362 return UnsignedLongErrorCode;
363 return result;
366 unsigned long long test_umulll_overflow(unsigned long long x, unsigned long long y) {
367 // CHECK: @test_umulll_overflow
368 // CHECK: %{{.+}} = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
369 unsigned long long result;
370 if (__builtin_umulll_overflow(x, y, &result))
371 return UnsignedLongLongErrorCode;
372 return result;
375 int test_sadd_overflow(int x, int y) {
376 // CHECK: @test_sadd_overflow
377 // CHECK: %{{.+}} = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
378 int result;
379 if (__builtin_sadd_overflow(x, y, &result))
380 return IntErrorCode;
381 return result;
384 long test_saddl_overflow(long x, long y) {
385 // CHECK: @test_saddl_overflow([[UL:i32|i64]] noundef %x
386 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.sadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
387 long result;
388 if (__builtin_saddl_overflow(x, y, &result))
389 return LongErrorCode;
390 return result;
393 long long test_saddll_overflow(long long x, long long y) {
394 // CHECK: @test_saddll_overflow
395 // CHECK: %{{.+}} = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
396 long long result;
397 if (__builtin_saddll_overflow(x, y, &result))
398 return LongLongErrorCode;
399 return result;
402 int test_ssub_overflow(int x, int y) {
403 // CHECK: @test_ssub_overflow
404 // CHECK: %{{.+}} = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
405 int result;
406 if (__builtin_ssub_overflow(x, y, &result))
407 return IntErrorCode;
408 return result;
411 long test_ssubl_overflow(long x, long y) {
412 // CHECK: @test_ssubl_overflow([[UL:i32|i64]] noundef %x
413 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.ssub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
414 long result;
415 if (__builtin_ssubl_overflow(x, y, &result))
416 return LongErrorCode;
417 return result;
420 long long test_ssubll_overflow(long long x, long long y) {
421 // CHECK: @test_ssubll_overflow
422 // CHECK: %{{.+}} = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
423 long long result;
424 if (__builtin_ssubll_overflow(x, y, &result))
425 return LongLongErrorCode;
426 return result;
429 int test_smul_overflow(int x, int y) {
430 // CHECK: @test_smul_overflow
431 // CHECK: %{{.+}} = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
432 int result;
433 if (__builtin_smul_overflow(x, y, &result))
434 return IntErrorCode;
435 return result;
438 long test_smull_overflow(long x, long y) {
439 // CHECK: @test_smull_overflow([[UL:i32|i64]] noundef %x
440 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.smul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
441 long result;
442 if (__builtin_smull_overflow(x, y, &result))
443 return LongErrorCode;
444 return result;
447 long long test_smulll_overflow(long long x, long long y) {
448 // CHECK: @test_smulll_overflow
449 // CHECK: %{{.+}} = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
450 long long result;
451 if (__builtin_smulll_overflow(x, y, &result))
452 return LongLongErrorCode;
453 return result;
456 int test_mixed_sign_mul_overflow_sext_signed_op(int x, unsigned long long y) {
457 // CHECK: @test_mixed_sign_mul_overflow_sext_signed_op
458 // CHECK: [[SignedOp:%.*]] = sext i32 %0 to i64
459 // CHECK: [[IsNeg:%.*]] = icmp slt i64 [[SignedOp]], 0
460 int result;
461 if (__builtin_mul_overflow(x, y, &result))
462 return LongErrorCode;
463 return result;
466 int test_mixed_sign_mul_overflow_zext_unsigned_op(long long x, unsigned y) {
467 // CHECK: @test_mixed_sign_mul_overflow_zext_unsigned_op
468 // CHECK: [[UnsignedOp:%.*]] = zext i32 %1 to i64
469 // CHECK: [[IsNeg:%.*]] = icmp slt i64 %0, 0
470 // CHECK: @llvm.umul.with.overflow.i64({{.*}}, i64 [[UnsignedOp]])
471 int result;
472 if (__builtin_mul_overflow(x, y, &result))
473 return LongErrorCode;
474 return result;
477 int test_mixed_sign_mull_overflow(int x, unsigned y) {
478 // CHECK: @test_mixed_sign_mull_overflow
479 // CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
480 // CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
481 // CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
482 // CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
483 // CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
484 // CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
485 // CHECK-NEXT: [[IsNegZext:%.*]] = zext i1 [[IsNeg]] to i32
486 // CHECK-NEXT: [[MaxResult:%.*]] = add i32 2147483647, [[IsNegZext]]
487 // CHECK-NEXT: [[SignedOFlow:%.*]] = icmp ugt i32 [[UnsignedResult]], [[MaxResult]]
488 // CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[SignedOFlow]]
489 // CHECK-NEXT: [[NegativeResult:%.*]] = sub i32 0, [[UnsignedResult]]
490 // CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegativeResult]], i32 [[UnsignedResult]]
491 // CHECK-NEXT: store i32 [[Result]], ptr %{{.*}}, align 4
492 // CHECK: br i1 [[OFlow]]
494 int result;
495 if (__builtin_mul_overflow(x, y, &result))
496 return LongErrorCode;
497 return result;
500 int test_mixed_sign_mull_overflow_unsigned(int x, unsigned y) {
501 // CHECK: @test_mixed_sign_mull_overflow_unsigned
502 // CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
503 // CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
504 // CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
505 // CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
506 // CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
507 // CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
508 // CHECK-NEXT: [[NotNull:%.*]] = icmp ne i32 [[UnsignedResult]], 0
509 // CHECK-NEXT: [[Underflow:%.*]] = and i1 [[IsNeg]], [[NotNull]]
510 // CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[Underflow]]
511 // CHECK-NEXT: [[NegatedResult:%.*]] = sub i32 0, [[UnsignedResult]]
512 // CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegatedResult]], i32 [[UnsignedResult]]
513 // CHECK-NEXT: store i32 [[Result]], ptr %{{.*}}, align 4
514 // CHECK: br i1 [[OFlow]]
516 unsigned result;
517 if (__builtin_mul_overflow(x, y, &result))
518 return LongErrorCode;
519 return result;
522 int test_mixed_sign_mull_overflow_swapped(int x, unsigned y) {
523 // CHECK: @test_mixed_sign_mull_overflow_swapped
524 // CHECK: call { i32, i1 } @llvm.umul.with.overflow.i32
525 // CHECK: add i32 2147483647
526 int result;
527 if (__builtin_mul_overflow(y, x, &result))
528 return LongErrorCode;
529 return result;
532 long long test_mixed_sign_mulll_overflow(long long x, unsigned long long y) {
533 // CHECK: @test_mixed_sign_mulll_overflow
534 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
535 // CHECK: add i64 92233720368547
536 long long result;
537 if (__builtin_mul_overflow(x, y, &result))
538 return LongLongErrorCode;
539 return result;
542 long long test_mixed_sign_mulll_overflow_swapped(long long x, unsigned long long y) {
543 // CHECK: @test_mixed_sign_mulll_overflow_swapped
544 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
545 // CHECK: add i64 92233720368547
546 long long result;
547 if (__builtin_mul_overflow(y, x, &result))
548 return LongLongErrorCode;
549 return result;
552 long long test_mixed_sign_mulll_overflow_trunc_signed(long long x, unsigned long long y) {
553 // CHECK: @test_mixed_sign_mulll_overflow_trunc_signed
554 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
555 // CHECK: add i64 2147483647
556 // CHECK: trunc
557 // CHECK: store
558 int result;
559 if (__builtin_mul_overflow(y, x, &result))
560 return LongLongErrorCode;
561 return result;
564 long long test_mixed_sign_mulll_overflow_trunc_unsigned(long long x, unsigned long long y) {
565 // CHECK: @test_mixed_sign_mulll_overflow_trunc_unsigned
566 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
567 // CHECK: [[NON_ZERO:%.*]] = icmp ne i64 [[UNSIGNED_RESULT:%.*]], 0
568 // CHECK-NEXT: [[UNDERFLOW:%.*]] = and i1 {{.*}}, [[NON_ZERO]]
569 // CHECK-NEXT: [[OVERFLOW_PRE_TRUNC:%.*]] = or i1 {{.*}}, [[UNDERFLOW]]
570 // CHECK-NEXT: [[TRUNC_OVERFLOW:%.*]] = icmp ugt i64 [[UNSIGNED_RESULT]], 4294967295
571 // CHECK-NEXT: [[OVERFLOW:%.*]] = or i1 [[OVERFLOW_PRE_TRUNC]], [[TRUNC_OVERFLOW]]
572 // CHECK-NEXT: [[NEGATED:%.*]] = sub i64 0, [[UNSIGNED_RESULT]]
573 // CHECK-NEXT: [[RESULT:%.*]] = select i1 {{.*}}, i64 [[NEGATED]], i64 [[UNSIGNED_RESULT]]
574 // CHECK-NEXT: trunc i64 [[RESULT]] to i32
575 // CHECK-NEXT: store
576 unsigned result;
577 if (__builtin_mul_overflow(y, x, &result))
578 return LongLongErrorCode;
579 return result;
582 long long test_mixed_sign_mul_overflow_extend_signed(int x, unsigned y) {
583 // CHECK: @test_mixed_sign_mul_overflow_extend_signed
584 // CHECK: call { i64, i1 } @llvm.smul.with.overflow.i64
585 long long result;
586 if (__builtin_mul_overflow(y, x, &result))
587 return LongLongErrorCode;
588 return result;
591 long long test_mixed_sign_mul_overflow_extend_unsigned(int x, unsigned y) {
592 // CHECK: @test_mixed_sign_mul_overflow_extend_unsigned
593 // CHECK: call { i65, i1 } @llvm.smul.with.overflow.i65
594 unsigned long long result;
595 if (__builtin_mul_overflow(y, x, &result))
596 return LongLongErrorCode;
597 return result;