1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt %s -instsimplify -S | FileCheck %s
4 ; Here we subtract two values, check that subtraction did not overflow AND
5 ; that the result is non-zero. This can be simplified just to a comparison
6 ; between the base and offset.
8 define i1 @t0(i64 %base, i64* nonnull %offsetptr) {
10 ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
11 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
12 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
13 ; CHECK-NEXT: ret i1 [[NO_UNDERFLOW]]
15 %offset = ptrtoint i64* %offsetptr to i64
17 %adjusted = sub i64 %base, %offset
18 %no_underflow = icmp uge i64 %adjusted, %base
19 %not_null = icmp ne i64 %adjusted, 0
20 %r = and i1 %not_null, %no_underflow
24 define i1 @t1(i64 %base, i64* nonnull %offsetptr) {
26 ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
27 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
28 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
29 ; CHECK-NEXT: ret i1 [[NO_UNDERFLOW]]
31 %offset = ptrtoint i64* %offsetptr to i64
33 %adjusted = sub i64 %base, %offset
34 %no_underflow = icmp ult i64 %adjusted, %base
35 %not_null = icmp eq i64 %adjusted, 0
36 %r = or i1 %not_null, %no_underflow
40 define i1 @t2_commutative(i64 %base, i64* nonnull %offsetptr) {
41 ; CHECK-LABEL: @t2_commutative(
42 ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
43 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
44 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[BASE]], [[ADJUSTED]]
45 ; CHECK-NEXT: ret i1 [[NO_UNDERFLOW]]
47 %offset = ptrtoint i64* %offsetptr to i64
49 %adjusted = sub i64 %base, %offset
50 %no_underflow = icmp ule i64 %base, %adjusted
51 %not_null = icmp ne i64 %adjusted, 0
52 %r = and i1 %not_null, %no_underflow
56 define i1 @t3_commutative(i64 %base, i64* nonnull %offsetptr) {
57 ; CHECK-LABEL: @t3_commutative(
58 ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
59 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
60 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[BASE]], [[ADJUSTED]]
61 ; CHECK-NEXT: ret i1 [[NO_UNDERFLOW]]
63 %offset = ptrtoint i64* %offsetptr to i64
65 %adjusted = sub i64 %base, %offset
66 %no_underflow = icmp ugt i64 %base, %adjusted
67 %not_null = icmp eq i64 %adjusted, 0
68 %r = or i1 %not_null, %no_underflow
72 ; We don't know that offset is non-zero, so we can't fold.
73 define i1 @t4_bad(i64 %base, i64 %offset) {
74 ; CHECK-LABEL: @t4_bad(
75 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET:%.*]]
76 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
77 ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
78 ; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]]
79 ; CHECK-NEXT: ret i1 [[R]]
81 %adjusted = sub i64 %base, %offset
82 %no_underflow = icmp uge i64 %adjusted, %base
83 %not_null = icmp ne i64 %adjusted, 0
84 %r = and i1 %not_null, %no_underflow