1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt %s -passes=instsimplify -S | FileCheck %s
4 ; Here we add unsigned two values, check that addition did not underflow AND
5 ; that the result is non-zero. This can be simplified just to a comparison
6 ; between the base and negated offset.
10 declare void @use1(i1)
11 declare void @llvm.assume(i1)
13 ; If we are checking that the result is not null or no underflow happened,
14 ; it is tautological (always-true).
15 define i1 @t1(i8 %base, i8 %offset) {
17 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
18 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
19 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
20 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
21 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
22 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
23 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
24 ; CHECK-NEXT: ret i1 [[R]]
26 %cmp = icmp slt i8 %base, 0
27 call void @llvm.assume(i1 %cmp)
29 %adjusted = add i8 %base, %offset
30 call void @use8(i8 %adjusted)
31 %not_null = icmp ne i8 %adjusted, 0
32 %no_underflow = icmp ult i8 %adjusted, %base
33 %r = or i1 %not_null, %no_underflow
36 define i1 @t2_commutative(i8 %base, i8 %offset) {
37 ; CHECK-LABEL: @t2_commutative(
38 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
39 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
40 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
41 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
42 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
43 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i8 [[BASE]], [[ADJUSTED]]
44 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
45 ; CHECK-NEXT: ret i1 [[R]]
47 %cmp = icmp slt i8 %base, 0
48 call void @llvm.assume(i1 %cmp)
50 %adjusted = add i8 %base, %offset
51 call void @use8(i8 %adjusted)
52 %not_null = icmp ne i8 %adjusted, 0
53 %no_underflow = icmp ugt i8 %base, %adjusted
54 %r = or i1 %not_null, %no_underflow
58 ; If we are checking that the result is null and underflow happened,
59 ; it is tautological (always-false).
60 define i1 @t3(i8 %base, i8 %offset) {
62 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
63 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
65 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
66 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
67 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
68 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
69 ; CHECK-NEXT: ret i1 [[R]]
71 %cmp = icmp slt i8 %base, 0
72 call void @llvm.assume(i1 %cmp)
74 %adjusted = add i8 %base, %offset
75 call void @use8(i8 %adjusted)
76 %not_null = icmp eq i8 %adjusted, 0
77 %no_underflow = icmp uge i8 %adjusted, %base
78 %r = and i1 %not_null, %no_underflow
81 define i1 @t4_commutative(i8 %base, i8 %offset) {
82 ; CHECK-LABEL: @t4_commutative(
83 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
84 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
85 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
86 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
87 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0
88 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[BASE]], [[ADJUSTED]]
89 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
90 ; CHECK-NEXT: ret i1 [[R]]
92 %cmp = icmp slt i8 %base, 0
93 call void @llvm.assume(i1 %cmp)
95 %adjusted = add i8 %base, %offset
96 call void @use8(i8 %adjusted)
97 %not_null = icmp eq i8 %adjusted, 0
98 %no_underflow = icmp ule i8 %base, %adjusted
99 %r = and i1 %not_null, %no_underflow
103 ; We only need to know that any of the 'add' operands is non-zero,
104 ; not necessarily the one used in the comparison.
105 define i1 @t5(i8 %base, i8 %offset) {
107 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
108 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
109 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
110 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
111 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
112 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
113 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
114 ; CHECK-NEXT: ret i1 [[R]]
116 %cmp = icmp slt i8 %offset, 0
117 call void @llvm.assume(i1 %cmp)
119 %adjusted = add i8 %base, %offset
120 call void @use8(i8 %adjusted)
121 %not_null = icmp ne i8 %adjusted, 0
122 %no_underflow = icmp ult i8 %adjusted, %base
123 %r = or i1 %not_null, %no_underflow