1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s
4 declare void @llvm.assume(i1)
6 ; Divisor is constant. X's range is known
8 define i8 @constant.divisor.v3(i8 %x) {
9 ; CHECK-LABEL: @constant.divisor.v3(
10 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 3
11 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
12 ; CHECK-NEXT: ret i8 0
14 %cmp.x.upper = icmp ult i8 %x, 3
15 call void @llvm.assume(i1 %cmp.x.upper)
19 define i8 @constant.divisor.v4(i8 %x) {
20 ; CHECK-LABEL: @constant.divisor.v4(
21 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 4
22 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
23 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
24 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
25 ; CHECK-NEXT: ret i8 [[DIV]]
27 %cmp.x.upper = icmp ult i8 %x, 4
28 call void @llvm.assume(i1 %cmp.x.upper)
32 define i8 @constant.divisor.x.range.v4(ptr %x.ptr) {
33 ; CHECK-LABEL: @constant.divisor.x.range.v4(
34 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG0:![0-9]+]]
35 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
36 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
37 ; CHECK-NEXT: ret i8 [[DIV]]
39 %x = load i8, ptr %x.ptr, !range !{ i8 0, i8 4 }
43 define i8 @constant.divisor.x.mask.v4(i8 %x) {
44 ; CHECK-LABEL: @constant.divisor.x.mask.v4(
45 ; CHECK-NEXT: [[X_MASKED:%.*]] = and i8 [[X:%.*]], 3
46 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X_MASKED]], 3
47 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
48 ; CHECK-NEXT: ret i8 [[DIV]]
50 %x.masked = and i8 %x, 3
51 %div = udiv i8 %x.masked, 3
54 define i8 @constant.divisor.v5(i8 %x) {
55 ; CHECK-LABEL: @constant.divisor.v5(
56 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 5
57 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
58 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
59 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
60 ; CHECK-NEXT: ret i8 [[DIV]]
62 %cmp.x.upper = icmp ult i8 %x, 5
63 call void @llvm.assume(i1 %cmp.x.upper)
67 define i8 @constant.divisor.v6(i8 %x) {
68 ; CHECK-LABEL: @constant.divisor.v6(
69 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 6
70 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
71 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
72 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
73 ; CHECK-NEXT: ret i8 [[DIV]]
75 %cmp.x.upper = icmp ult i8 %x, 6
76 call void @llvm.assume(i1 %cmp.x.upper)
80 define i8 @constant.divisor.v7(i8 %x) {
81 ; CHECK-LABEL: @constant.divisor.v7(
82 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 7
83 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
84 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
85 ; CHECK-NEXT: ret i8 [[DIV]]
87 %cmp.x.upper = icmp ult i8 %x, 7
88 call void @llvm.assume(i1 %cmp.x.upper)
93 define i8 @constant.divisor.v6to8(i8 %x) {
94 ; CHECK-LABEL: @constant.divisor.v6to8(
95 ; CHECK-NEXT: [[CMP_X_LOWER:%.*]] = icmp samesign uge i8 [[X:%.*]], 6
96 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_LOWER]])
97 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X]], 9
98 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
99 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
100 ; CHECK-NEXT: ret i8 2
102 %cmp.x.lower = icmp uge i8 %x, 6
103 call void @llvm.assume(i1 %cmp.x.lower)
104 %cmp.x.upper = icmp ult i8 %x, 9
105 call void @llvm.assume(i1 %cmp.x.upper)
110 define i8 @constant.divisor.v9to11(i8 %x) {
111 ; CHECK-LABEL: @constant.divisor.v9to11(
112 ; CHECK-NEXT: [[CMP_X_LOWER:%.*]] = icmp samesign uge i8 [[X:%.*]], 9
113 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_LOWER]])
114 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X]], 12
115 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
116 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
117 ; CHECK-NEXT: ret i8 3
119 %cmp.x.lower = icmp uge i8 %x, 9
120 call void @llvm.assume(i1 %cmp.x.lower)
121 %cmp.x.upper = icmp ult i8 %x, 12
122 call void @llvm.assume(i1 %cmp.x.upper)
127 define i8 @constant.divisor.v12to14(i8 %x) {
128 ; CHECK-LABEL: @constant.divisor.v12to14(
129 ; CHECK-NEXT: [[CMP_X_LOWER:%.*]] = icmp samesign uge i8 [[X:%.*]], 12
130 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_LOWER]])
131 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X]], 15
132 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
133 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
134 ; CHECK-NEXT: ret i8 4
136 %cmp.x.lower = icmp uge i8 %x, 12
137 call void @llvm.assume(i1 %cmp.x.lower)
138 %cmp.x.upper = icmp ult i8 %x, 15
139 call void @llvm.assume(i1 %cmp.x.upper)
144 define i8 @constant.divisor.v6to11(i8 %x) {
145 ; CHECK-LABEL: @constant.divisor.v6to11(
146 ; CHECK-NEXT: [[CMP_X_LOWER:%.*]] = icmp samesign uge i8 [[X:%.*]], 6
147 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_LOWER]])
148 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X]], 12
149 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
150 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
151 ; CHECK-NEXT: ret i8 [[DIV]]
153 %cmp.x.lower = icmp uge i8 %x, 6
154 call void @llvm.assume(i1 %cmp.x.lower)
155 %cmp.x.upper = icmp ult i8 %x, 12
156 call void @llvm.assume(i1 %cmp.x.upper)
161 ; Both are variable. Bounds are known
163 define i8 @variable.v3(i8 %x, i8 %y) {
164 ; CHECK-LABEL: @variable.v3(
165 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 3
166 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
167 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp samesign uge i8 [[Y:%.*]], 3
168 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
169 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
170 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
171 ; CHECK-NEXT: ret i8 0
173 %cmp.x = icmp ult i8 %x, 3
174 call void @llvm.assume(i1 %cmp.x)
175 %cmp.y.lower = icmp uge i8 %y, 3
176 call void @llvm.assume(i1 %cmp.y.lower)
177 %cmp.y.upper = icmp ule i8 %y, 4
178 call void @llvm.assume(i1 %cmp.y.upper)
179 %div = udiv i8 %x, %y
182 define i8 @variable.v4(i8 %x, i8 %y) {
183 ; CHECK-LABEL: @variable.v4(
184 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 4
185 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
186 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp samesign uge i8 [[Y:%.*]], 3
187 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
188 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
189 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
190 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
191 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
192 ; CHECK-NEXT: ret i8 [[DIV]]
194 %cmp.x = icmp ult i8 %x, 4
195 call void @llvm.assume(i1 %cmp.x)
196 %cmp.y.lower = icmp uge i8 %y, 3
197 call void @llvm.assume(i1 %cmp.y.lower)
198 %cmp.y.upper = icmp ule i8 %y, 4
199 call void @llvm.assume(i1 %cmp.y.upper)
200 %div = udiv i8 %x, %y
203 define i8 @variable.v4.range(ptr %x.ptr, ptr %y.ptr) {
204 ; CHECK-LABEL: @variable.v4.range(
205 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG0]]
206 ; CHECK-NEXT: [[Y:%.*]] = load i8, ptr [[Y_PTR:%.*]], align 1, !range [[RNG1:![0-9]+]]
207 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
208 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
209 ; CHECK-NEXT: ret i8 [[DIV]]
211 %x = load i8, ptr %x.ptr, !range !{ i8 0, i8 4 }
212 %y = load i8, ptr %y.ptr, !range !{ i8 3, i8 5 }
213 %div = udiv i8 %x, %y
216 define i8 @variable.v5(i8 %x, i8 %y) {
217 ; CHECK-LABEL: @variable.v5(
218 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 5
219 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
220 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp samesign uge i8 [[Y:%.*]], 3
221 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
222 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
223 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
224 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
225 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
226 ; CHECK-NEXT: ret i8 [[DIV]]
228 %cmp.x = icmp ult i8 %x, 5
229 call void @llvm.assume(i1 %cmp.x)
230 %cmp.y.lower = icmp uge i8 %y, 3
231 call void @llvm.assume(i1 %cmp.y.lower)
232 %cmp.y.upper = icmp ule i8 %y, 4
233 call void @llvm.assume(i1 %cmp.y.upper)
234 %div = udiv i8 %x, %y
237 define i8 @variable.v6(i8 %x, i8 %y) {
238 ; CHECK-LABEL: @variable.v6(
239 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 6
240 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
241 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp samesign uge i8 [[Y:%.*]], 3
242 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
243 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
244 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
245 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
246 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
247 ; CHECK-NEXT: ret i8 [[DIV]]
249 %cmp.x = icmp ult i8 %x, 6
250 call void @llvm.assume(i1 %cmp.x)
251 %cmp.y.lower = icmp uge i8 %y, 3
252 call void @llvm.assume(i1 %cmp.y.lower)
253 %cmp.y.upper = icmp ule i8 %y, 4
254 call void @llvm.assume(i1 %cmp.y.upper)
255 %div = udiv i8 %x, %y
258 define i8 @variable.v7(i8 %x, i8 %y) {
259 ; CHECK-LABEL: @variable.v7(
260 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 7
261 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
262 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp samesign uge i8 [[Y:%.*]], 3
263 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
264 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
265 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
266 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], [[Y]]
267 ; CHECK-NEXT: ret i8 [[DIV]]
269 %cmp.x = icmp ult i8 %x, 7
270 call void @llvm.assume(i1 %cmp.x)
271 %cmp.y.lower = icmp uge i8 %y, 3
272 call void @llvm.assume(i1 %cmp.y.lower)
273 %cmp.y.upper = icmp ule i8 %y, 4
274 call void @llvm.assume(i1 %cmp.y.upper)
275 %div = udiv i8 %x, %y
281 define i8 @large.divisor.v0(i8 %x) {
282 ; CHECK-LABEL: @large.divisor.v0(
283 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 127
284 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
285 ; CHECK-NEXT: ret i8 0
287 %cmp.x.upper = icmp ult i8 %x, 127
288 call void @llvm.assume(i1 %cmp.x.upper)
289 %div = udiv i8 %x, 127
292 define i8 @large.divisor.v1(i8 %x) {
293 ; CHECK-LABEL: @large.divisor.v1(
294 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -128
295 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
296 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 127
297 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
298 ; CHECK-NEXT: ret i8 [[DIV]]
300 %cmp.x.upper = icmp ult i8 %x, 128
301 call void @llvm.assume(i1 %cmp.x.upper)
302 %div = udiv i8 %x, 127
305 define i8 @large.divisor.v1.range(ptr %x.ptr) {
306 ; CHECK-LABEL: @large.divisor.v1.range(
307 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG2:![0-9]+]]
308 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 127
309 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
310 ; CHECK-NEXT: ret i8 [[DIV]]
312 %x = load i8, ptr %x.ptr, !range !{ i8 0, i8 128 }
313 %div = udiv i8 %x, 127
316 define i8 @large.divisor.v2.unbound.x(i8 %x) {
317 ; CHECK-LABEL: @large.divisor.v2.unbound.x(
318 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X:%.*]], 127
319 ; CHECK-NEXT: ret i8 [[DIV]]
321 %div = udiv i8 %x, 127
325 define i8 @large.divisor.with.overflow.v0(i8 %x) {
326 ; CHECK-LABEL: @large.divisor.with.overflow.v0(
327 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -128
328 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
329 ; CHECK-NEXT: ret i8 0
331 %cmp.x.upper = icmp ult i8 %x, 128
332 call void @llvm.assume(i1 %cmp.x.upper)
333 %div = udiv i8 %x, 128
336 define i8 @large.divisor.with.overflow.v1(i8 %x) {
337 ; CHECK-LABEL: @large.divisor.with.overflow.v1(
338 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -127
339 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
340 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], -128
341 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
342 ; CHECK-NEXT: ret i8 [[DIV]]
344 %cmp.x.upper = icmp ult i8 %x, 129
345 call void @llvm.assume(i1 %cmp.x.upper)
346 %div = udiv i8 %x, 128
349 define i8 @large.divisor.with.overflow.v1.range(ptr %x.ptr) {
350 ; CHECK-LABEL: @large.divisor.with.overflow.v1.range(
351 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG3:![0-9]+]]
352 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], -128
353 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
354 ; CHECK-NEXT: ret i8 [[DIV]]
356 %x = load i8, ptr %x.ptr, !range !{ i8 0, i8 129 }
357 %div = udiv i8 %x, 128
360 define i8 @large.divisor.with.overflow.v2.unbound.x(i8 %x) {
361 ; CHECK-LABEL: @large.divisor.with.overflow.v2.unbound.x(
362 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X:%.*]], -128
363 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
364 ; CHECK-NEXT: ret i8 [[DIV]]
366 %div = udiv i8 %x, 128
370 define i8 @known_uge(i8 noundef %x) {
371 ; CHECK-LABEL: @known_uge(
372 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 6
373 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
374 ; CHECK-NEXT: [[CMP_X_LOWER:%.*]] = icmp samesign uge i8 [[X]], 3
375 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_LOWER]])
376 ; CHECK-NEXT: ret i8 1
378 %cmp.x.upper = icmp ult i8 %x, 6
379 call void @llvm.assume(i1 %cmp.x.upper)
380 %cmp.x.lower = icmp uge i8 %x, 3
381 call void @llvm.assume(i1 %cmp.x.lower)