1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 target datalayout = "e-p:40:64:64:32-p1:16:16:16-p2:32:32:32-p3:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
6 declare i32 @test58_d(i64 )
8 define i1 @test59(i8* %foo) {
9 ; CHECK-LABEL: @test59(
10 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, i8* [[FOO:%.*]], i32 8
11 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8* [[GEP1]] to i40
12 ; CHECK-NEXT: [[USE:%.*]] = zext i40 [[TMP1]] to i64
13 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]])
14 ; CHECK-NEXT: ret i1 true
16 %bit = bitcast i8* %foo to i32*
17 %gep1 = getelementptr inbounds i32, i32* %bit, i64 2
18 %gep2 = getelementptr inbounds i8, i8* %foo, i64 10
19 %cast1 = bitcast i32* %gep1 to i8*
20 %cmp = icmp ult i8* %cast1, %gep2
21 %use = ptrtoint i8* %cast1 to i64
22 %call = call i32 @test58_d(i64 %use)
26 define i1 @test59_as1(i8 addrspace(1)* %foo) {
27 ; CHECK-LABEL: @test59_as1(
28 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[FOO:%.*]], i16 8
29 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8 addrspace(1)* [[GEP1]] to i16
30 ; CHECK-NEXT: [[USE:%.*]] = zext i16 [[TMP1]] to i64
31 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]])
32 ; CHECK-NEXT: ret i1 true
34 %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
35 %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 2
36 %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 10
37 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
38 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
39 %use = ptrtoint i8 addrspace(1)* %cast1 to i64
40 %call = call i32 @test58_d(i64 %use)
44 define i1 @test60(i8* %foo, i64 %i, i64 %j) {
45 ; CHECK-LABEL: @test60(
46 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
47 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
48 ; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i32 [[TMP1]], 2
49 ; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i32 [[GEP1_IDX]], [[TMP2]]
50 ; CHECK-NEXT: ret i1 [[TMP3]]
52 %bit = bitcast i8* %foo to i32*
53 %gep1 = getelementptr inbounds i32, i32* %bit, i64 %i
54 %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
55 %cast1 = bitcast i32* %gep1 to i8*
56 %cmp = icmp ult i8* %cast1, %gep2
60 define i1 @test60_as1(i8 addrspace(1)* %foo, i64 %i, i64 %j) {
61 ; CHECK-LABEL: @test60_as1(
62 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
63 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
64 ; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
65 ; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
66 ; CHECK-NEXT: ret i1 [[TMP3]]
68 %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
69 %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 %i
70 %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 %j
71 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
72 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
76 ; Same as test60, but look through an addrspacecast instead of a
77 ; bitcast. This uses the same sized addrspace.
78 define i1 @test60_addrspacecast(i8* %foo, i64 %i, i64 %j) {
79 ; CHECK-LABEL: @test60_addrspacecast(
80 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i32
81 ; CHECK-NEXT: [[I_TR:%.*]] = trunc i64 [[I:%.*]] to i32
82 ; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[I_TR]], 2
83 ; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[TMP1]]
84 ; CHECK-NEXT: ret i1 [[TMP3]]
86 %bit = addrspacecast i8* %foo to i32 addrspace(3)*
87 %gep1 = getelementptr inbounds i32, i32 addrspace(3)* %bit, i64 %i
88 %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
89 %cast1 = addrspacecast i32 addrspace(3)* %gep1 to i8*
90 %cmp = icmp ult i8* %cast1, %gep2
94 define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) {
95 ; CHECK-LABEL: @test60_addrspacecast_smaller(
96 ; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2
97 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i16
98 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP1]]
99 ; CHECK-NEXT: ret i1 [[TMP2]]
101 %bit = addrspacecast i8* %foo to i32 addrspace(1)*
102 %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i16 %i
103 %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
104 %cast1 = addrspacecast i32 addrspace(1)* %gep1 to i8*
105 %cmp = icmp ult i8* %cast1, %gep2
109 define i1 @test60_addrspacecast_larger(i8 addrspace(1)* %foo, i32 %i, i16 %j) {
110 ; CHECK-LABEL: @test60_addrspacecast_larger(
111 ; CHECK-NEXT: [[I_TR:%.*]] = trunc i32 [[I:%.*]] to i16
112 ; CHECK-NEXT: [[TMP1:%.*]] = shl i16 [[I_TR]], 2
113 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i16 [[TMP1]], [[J:%.*]]
114 ; CHECK-NEXT: ret i1 [[TMP2]]
116 %bit = addrspacecast i8 addrspace(1)* %foo to i32 addrspace(2)*
117 %gep1 = getelementptr inbounds i32, i32 addrspace(2)* %bit, i32 %i
118 %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i16 %j
119 %cast1 = addrspacecast i32 addrspace(2)* %gep1 to i8 addrspace(1)*
120 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
124 define i1 @test61(i8* %foo, i64 %i, i64 %j) {
125 ; CHECK-LABEL: @test61(
126 ; CHECK-NEXT: [[BIT:%.*]] = bitcast i8* [[FOO:%.*]] to i32*
127 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
128 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32* [[BIT]], i32 [[TMP1]]
129 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
130 ; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8* [[FOO]], i32 [[TMP2]]
131 ; CHECK-NEXT: [[CAST1:%.*]] = bitcast i32* [[GEP1]] to i8*
132 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[GEP2]], [[CAST1]]
133 ; CHECK-NEXT: ret i1 [[CMP]]
135 %bit = bitcast i8* %foo to i32*
136 %gep1 = getelementptr i32, i32* %bit, i64 %i
137 %gep2 = getelementptr i8, i8* %foo, i64 %j
138 %cast1 = bitcast i32* %gep1 to i8*
139 %cmp = icmp ult i8* %cast1, %gep2
141 ; Don't transform non-inbounds GEPs.
144 define i1 @test61_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
145 ; CHECK-LABEL: @test61_as1(
146 ; CHECK-NEXT: [[BIT:%.*]] = bitcast i8 addrspace(1)* [[FOO:%.*]] to i32 addrspace(1)*
147 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32 addrspace(1)* [[BIT]], i16 [[I:%.*]]
148 ; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8 addrspace(1)* [[FOO]], i16 [[J:%.*]]
149 ; CHECK-NEXT: [[CAST1:%.*]] = bitcast i32 addrspace(1)* [[GEP1]] to i8 addrspace(1)*
150 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 addrspace(1)* [[GEP2]], [[CAST1]]
151 ; CHECK-NEXT: ret i1 [[CMP]]
153 %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
154 %gep1 = getelementptr i32, i32 addrspace(1)* %bit, i16 %i
155 %gep2 = getelementptr i8, i8 addrspace(1)* %foo, i16 %j
156 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
157 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
159 ; Don't transform non-inbounds GEPs.
162 define i1 @test62(i8* %a) {
163 ; CHECK-LABEL: @test62(
164 ; CHECK-NEXT: ret i1 true
166 %arrayidx1 = getelementptr inbounds i8, i8* %a, i64 1
167 %arrayidx2 = getelementptr inbounds i8, i8* %a, i64 10
168 %cmp = icmp slt i8* %arrayidx1, %arrayidx2
172 define i1 @test62_as1(i8 addrspace(1)* %a) {
173 ; CHECK-LABEL: @test62_as1(
174 ; CHECK-NEXT: ret i1 true
176 %arrayidx1 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 1
177 %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 10
178 %cmp = icmp slt i8 addrspace(1)* %arrayidx1, %arrayidx2
183 ; Variation of the above with an ashr
184 define i1 @icmp_and_ashr_multiuse(i32 %X) {
185 ; CHECK-LABEL: @icmp_and_ashr_multiuse(
186 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 240
187 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 224
188 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X]], 496
189 ; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[TMP2]], 432
190 ; CHECK-NEXT: [[AND3:%.*]] = and i1 [[TOBOOL]], [[TOBOOL2]]
191 ; CHECK-NEXT: ret i1 [[AND3]]
193 %shr = ashr i32 %X, 4
194 %and = and i32 %shr, 15
195 %and2 = and i32 %shr, 31 ; second use of the shift
196 %tobool = icmp ne i32 %and, 14
197 %tobool2 = icmp ne i32 %and2, 27
198 %and3 = and i1 %tobool, %tobool2
202 define i1 @icmp_and_ashr_multiuse_logical(i32 %X) {
203 ; CHECK-LABEL: @icmp_and_ashr_multiuse_logical(
204 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 240
205 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 224
206 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X]], 496
207 ; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[TMP2]], 432
208 ; CHECK-NEXT: [[AND3:%.*]] = and i1 [[TOBOOL]], [[TOBOOL2]]
209 ; CHECK-NEXT: ret i1 [[AND3]]
211 %shr = ashr i32 %X, 4
212 %and = and i32 %shr, 15
213 %and2 = and i32 %shr, 31 ; second use of the shift
214 %tobool = icmp ne i32 %and, 14
215 %tobool2 = icmp ne i32 %and2, 27
216 %and3 = select i1 %tobool, i1 %tobool2, i1 false
220 define i1 @icmp_lshr_and_overshift(i8 %X) {
221 ; CHECK-LABEL: @icmp_lshr_and_overshift(
222 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ugt i8 [[X:%.*]], 31
223 ; CHECK-NEXT: ret i1 [[TOBOOL]]
226 %and = and i8 %shr, 15
227 %tobool = icmp ne i8 %and, 0
231 ; We shouldn't simplify this because the and uses bits that are shifted in.
232 define i1 @icmp_ashr_and_overshift(i8 %X) {
233 ; CHECK-LABEL: @icmp_ashr_and_overshift(
234 ; CHECK-NEXT: [[SHR:%.*]] = ashr i8 [[X:%.*]], 5
235 ; CHECK-NEXT: [[AND:%.*]] = and i8 [[SHR]], 15
236 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[AND]], 0
237 ; CHECK-NEXT: ret i1 [[TOBOOL]]
240 %and = and i8 %shr, 15
241 %tobool = icmp ne i8 %and, 0
246 define i1 @test71(i8* %x) {
247 ; CHECK-LABEL: @test71(
248 ; CHECK-NEXT: ret i1 false
250 %a = getelementptr i8, i8* %x, i64 8
251 %b = getelementptr inbounds i8, i8* %x, i64 8
252 %c = icmp ugt i8* %a, %b
256 define i1 @test71_as1(i8 addrspace(1)* %x) {
257 ; CHECK-LABEL: @test71_as1(
258 ; CHECK-NEXT: ret i1 false
260 %a = getelementptr i8, i8 addrspace(1)* %x, i64 8
261 %b = getelementptr inbounds i8, i8 addrspace(1)* %x, i64 8
262 %c = icmp ugt i8 addrspace(1)* %a, %b