1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
4 declare { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32>, <2 x i32>)
6 declare { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8>, <2 x i8>)
8 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
10 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8)
12 define { i32, i1 } @simple_fold(i32 %x) {
13 ; CHECK-LABEL: @simple_fold(
14 ; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 20)
15 ; CHECK-NEXT: ret { i32, i1 } [[B]]
17 %a = add nuw i32 %x, 7
18 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 13)
22 define { i8, i1 } @fold_on_constant_add_no_overflow(i8 %x) {
23 ; CHECK-LABEL: @fold_on_constant_add_no_overflow(
24 ; CHECK-NEXT: [[B:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[X:%.*]], i8 -1)
25 ; CHECK-NEXT: ret { i8, i1 } [[B]]
27 %a = add nuw i8 %x, 200
28 %b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 55)
32 define { i8, i1 } @no_fold_on_constant_add_overflow(i8 %x) {
33 ; CHECK-LABEL: @no_fold_on_constant_add_overflow(
34 ; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 poison, i1 true }, i8 [[X:%.*]], 0
35 ; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
37 %a = add nuw i8 %x, 200
38 %b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 56)
42 define { <2 x i8>, <2 x i1> } @no_fold_vector_no_overflow(<2 x i8> %x) {
43 ; CHECK-LABEL: @no_fold_vector_no_overflow(
44 ; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 -57, i8 -56>
45 ; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[A]], <2 x i8> <i8 55, i8 55>)
46 ; CHECK-NEXT: ret { <2 x i8>, <2 x i1> } [[B]]
48 %a = add nuw <2 x i8> %x, <i8 199, i8 200>
49 %b = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %a, <2 x i8> <i8 55, i8 55>)
50 ret { <2 x i8>, <2 x i1> } %b
53 define { <2 x i8>, <2 x i1> } @no_fold_vector_overflow(<2 x i8> %x) {
54 ; CHECK-LABEL: @no_fold_vector_overflow(
55 ; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 -56, i8 -55>
56 ; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[A]], <2 x i8> <i8 55, i8 55>)
57 ; CHECK-NEXT: ret { <2 x i8>, <2 x i1> } [[B]]
59 %a = add nuw <2 x i8> %x, <i8 200, i8 201>
60 %b = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %a, <2 x i8> <i8 55, i8 55>)
61 ret { <2 x i8>, <2 x i1> } %b
64 define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32> %x) {
65 ; CHECK-LABEL: @fold_simple_splat_constant(
66 ; CHECK-NEXT: [[B:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 42, i32 42>)
67 ; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
69 %a = add nuw <2 x i32> %x, <i32 12, i32 12>
70 %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
71 ret { <2 x i32>, <2 x i1> } %b
74 define { <2 x i32>, <2 x i1> } @no_fold_splat_undef_constant(<2 x i32> %x) {
75 ; CHECK-LABEL: @no_fold_splat_undef_constant(
76 ; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], <i32 12, i32 undef>
77 ; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
78 ; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
80 %a = add nuw <2 x i32> %x, <i32 12, i32 undef>
81 %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
82 ret { <2 x i32>, <2 x i1> } %b
85 define { <2 x i32>, <2 x i1> } @no_fold_splat_not_constant(<2 x i32> %x, <2 x i32> %y) {
86 ; CHECK-LABEL: @no_fold_splat_not_constant(
87 ; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], [[Y:%.*]]
88 ; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
89 ; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
91 %a = add nuw <2 x i32> %x, %y
92 %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
93 ret { <2 x i32>, <2 x i1> } %b
96 define { i32, i1 } @fold_nuwnsw(i32 %x) {
97 ; CHECK-LABEL: @fold_nuwnsw(
98 ; CHECK-NEXT: [[B:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
99 ; CHECK-NEXT: ret { i32, i1 } [[B]]
101 %a = add nuw nsw i32 %x, 12
102 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30)
106 define { i32, i1 } @no_fold_nsw(i32 %x) {
107 ; CHECK-LABEL: @no_fold_nsw(
108 ; CHECK-NEXT: [[A:%.*]] = add nsw i32 [[X:%.*]], 12
109 ; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
110 ; CHECK-NEXT: ret { i32, i1 } [[B]]
112 %a = add nsw i32 %x, 12
113 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30)
117 define { i32, i1 } @no_fold_wrapped_add(i32 %x) {
118 ; CHECK-LABEL: @no_fold_wrapped_add(
119 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], 12
120 ; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
121 ; CHECK-NEXT: ret { i32, i1 } [[B]]
124 %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 30, i32 %a)