1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 declare void @llvm.assume(i1)
8 define i1 @t0(i8 %base, i8 %offset) {
10 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
11 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
12 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
13 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
14 ; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
15 ; CHECK-NEXT: ret i1 [[RES]]
17 %cmp = icmp slt i8 %offset, 0
18 call void @llvm.assume(i1 %cmp)
20 %adjusted = sub i8 %base, %offset
21 call void @use8(i8 %adjusted)
22 %res = icmp ult i8 %adjusted, %base
25 define i1 @t1(i8 %base, i8 %offset) {
27 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
28 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
29 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
30 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
31 ; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
32 ; CHECK-NEXT: ret i1 [[RES]]
34 %cmp = icmp slt i8 %offset, 0
35 call void @llvm.assume(i1 %cmp)
37 %adjusted = sub i8 %base, %offset
38 call void @use8(i8 %adjusted)
39 %res = icmp uge i8 %adjusted, %base
42 define i1 @t2(i8 %offset) {
44 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
45 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
46 ; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
47 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
48 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
49 ; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
50 ; CHECK-NEXT: ret i1 [[RES]]
52 %cmp = icmp slt i8 %offset, 0
53 call void @llvm.assume(i1 %cmp)
55 %base = call i8 @gen8()
56 %adjusted = sub i8 %base, %offset
57 call void @use8(i8 %adjusted)
58 %res = icmp ugt i8 %base, %adjusted
61 define i1 @t3(i8 %offset) {
63 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
64 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
65 ; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
66 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
67 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
68 ; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
69 ; CHECK-NEXT: ret i1 [[RES]]
71 %cmp = icmp slt i8 %offset, 0
72 call void @llvm.assume(i1 %cmp)
74 %base = call i8 @gen8()
75 %adjusted = sub i8 %base, %offset
76 call void @use8(i8 %adjusted)
77 %res = icmp ule i8 %base, %adjusted
81 ; Here we don't know that offset is non-zero. Can't fold.
82 define i1 @n4_maybezero(i8 %base, i8 %offset) {
83 ; CHECK-LABEL: @n4_maybezero(
84 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
85 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
86 ; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
87 ; CHECK-NEXT: ret i1 [[RES]]
89 %adjusted = sub i8 %base, %offset
90 call void @use8(i8 %adjusted)
91 %res = icmp ult i8 %adjusted, %base
94 ; We need to know that about %offset, %base won't do. Can't fold.
95 define i1 @n5_wrongnonzero(i8 %base, i8 %offset) {
96 ; CHECK-LABEL: @n5_wrongnonzero(
97 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[BASE:%.*]], 0
98 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
99 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET:%.*]]
100 ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
101 ; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
102 ; CHECK-NEXT: ret i1 [[RES]]
104 %cmp = icmp sgt i8 %base, 0
105 call void @llvm.assume(i1 %cmp)
107 %adjusted = sub i8 %base, %offset
108 call void @use8(i8 %adjusted)
109 %res = icmp ult i8 %adjusted, %base