1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt %s -passes=instsimplify -S | FileCheck %s
4 ; Here we add unsigned two values, check that addition did not underflow AND
5 ; that the result is non-zero. This can be simplified just to a comparison
6 ; between the base and negated offset.
10 declare void @use1(i1)
11 declare void @llvm.assume(i1)
13 ; If we are checking that the result is not null or no underflow happened,
14 ; it is tautological (always-true).
15 define i1 @t1(i8 %base, i8 %offset) {
17 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
18 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
19 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
20 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
21 ; CHECK-NEXT: ret i1 true
23 %cmp = icmp slt i8 %base, 0
24 call void @llvm.assume(i1 %cmp)
26 %adjusted = add i8 %base, %offset
27 call void @use8(i8 %adjusted)
28 %not_null = icmp ne i8 %adjusted, 0
29 %no_underflow = icmp ult i8 %adjusted, %base
30 %r = or i1 %not_null, %no_underflow
33 define i1 @t2_commutative(i8 %base, i8 %offset) {
34 ; CHECK-LABEL: @t2_commutative(
35 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
36 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
37 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
38 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
39 ; CHECK-NEXT: ret i1 true
41 %cmp = icmp slt i8 %base, 0
42 call void @llvm.assume(i1 %cmp)
44 %adjusted = add i8 %base, %offset
45 call void @use8(i8 %adjusted)
46 %not_null = icmp ne i8 %adjusted, 0
47 %no_underflow = icmp ugt i8 %base, %adjusted
48 %r = or i1 %not_null, %no_underflow
52 ; If we are checking that the result is null and underflow happened,
53 ; it is tautological (always-false).
54 define i1 @t3(i8 %base, i8 %offset) {
56 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
57 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
58 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
59 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
60 ; CHECK-NEXT: ret i1 false
62 %cmp = icmp slt i8 %base, 0
63 call void @llvm.assume(i1 %cmp)
65 %adjusted = add i8 %base, %offset
66 call void @use8(i8 %adjusted)
67 %not_null = icmp eq i8 %adjusted, 0
68 %no_underflow = icmp uge i8 %adjusted, %base
69 %r = and i1 %not_null, %no_underflow
72 define i1 @t4_commutative(i8 %base, i8 %offset) {
73 ; CHECK-LABEL: @t4_commutative(
74 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[BASE:%.*]], 0
75 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
76 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]]
77 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
78 ; CHECK-NEXT: ret i1 false
80 %cmp = icmp slt i8 %base, 0
81 call void @llvm.assume(i1 %cmp)
83 %adjusted = add i8 %base, %offset
84 call void @use8(i8 %adjusted)
85 %not_null = icmp eq i8 %adjusted, 0
86 %no_underflow = icmp ule i8 %base, %adjusted
87 %r = and i1 %not_null, %no_underflow
91 ; We only need to know that any of the 'add' operands is non-zero,
92 ; not necessarily the one used in the comparison.
93 define i1 @t5(i8 %base, i8 %offset) {
95 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
96 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
97 ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]]
98 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
99 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0
100 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
101 ; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
102 ; CHECK-NEXT: ret i1 [[R]]
104 %cmp = icmp slt i8 %offset, 0
105 call void @llvm.assume(i1 %cmp)
107 %adjusted = add i8 %base, %offset
108 call void @use8(i8 %adjusted)
109 %not_null = icmp ne i8 %adjusted, 0
110 %no_underflow = icmp ult i8 %adjusted, %base
111 %r = or i1 %not_null, %no_underflow