1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s
4 declare void @llvm.assume(i1)
6 ; Divisor is constant. X's range is known
8 define i8 @constant.divisor.v3(i8 %x) {
9 ; CHECK-LABEL: @constant.divisor.v3(
10 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 3
11 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
12 ; CHECK-NEXT: ret i8 0
14 %cmp.x.upper = icmp ult i8 %x, 3
15 call void @llvm.assume(i1 %cmp.x.upper)
19 define i8 @constant.divisor.v4(i8 %x) {
20 ; CHECK-LABEL: @constant.divisor.v4(
21 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 4
22 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
23 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
24 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
25 ; CHECK-NEXT: ret i8 [[DIV]]
27 %cmp.x.upper = icmp ult i8 %x, 4
28 call void @llvm.assume(i1 %cmp.x.upper)
32 define i8 @constant.divisor.x.range.v4(ptr %x.ptr) {
33 ; CHECK-LABEL: @constant.divisor.x.range.v4(
34 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG0:![0-9]+]]
35 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
36 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
37 ; CHECK-NEXT: ret i8 [[DIV]]
39 %x = load i8, ptr %x.ptr, !range !{ i8 0, i8 4 }
43 define i8 @constant.divisor.x.mask.v4(i8 %x) {
44 ; CHECK-LABEL: @constant.divisor.x.mask.v4(
45 ; CHECK-NEXT: [[X_MASKED:%.*]] = and i8 [[X:%.*]], 3
46 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X_MASKED]], 3
47 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
48 ; CHECK-NEXT: ret i8 [[DIV]]
50 %x.masked = and i8 %x, 3
51 %div = udiv i8 %x.masked, 3
54 define i8 @constant.divisor.v5(i8 %x) {
55 ; CHECK-LABEL: @constant.divisor.v5(
56 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 5
57 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
58 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
59 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
60 ; CHECK-NEXT: ret i8 [[DIV]]
62 %cmp.x.upper = icmp ult i8 %x, 5
63 call void @llvm.assume(i1 %cmp.x.upper)
67 define i8 @constant.divisor.v6(i8 %x) {
68 ; CHECK-LABEL: @constant.divisor.v6(
69 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 6
70 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
71 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
72 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
73 ; CHECK-NEXT: ret i8 [[DIV]]
75 %cmp.x.upper = icmp ult i8 %x, 6
76 call void @llvm.assume(i1 %cmp.x.upper)
80 define i8 @constant.divisor.v7(i8 %x) {
81 ; CHECK-LABEL: @constant.divisor.v7(
82 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 7
83 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
84 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
85 ; CHECK-NEXT: ret i8 [[DIV]]
87 %cmp.x.upper = icmp ult i8 %x, 7
88 call void @llvm.assume(i1 %cmp.x.upper)
93 ; Both are variable. Bounds are known
95 define i8 @variable.v3(i8 %x, i8 %y) {
96 ; CHECK-LABEL: @variable.v3(
97 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 3
98 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
99 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp uge i8 [[Y:%.*]], 3
100 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
101 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
102 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
103 ; CHECK-NEXT: ret i8 0
105 %cmp.x = icmp ult i8 %x, 3
106 call void @llvm.assume(i1 %cmp.x)
107 %cmp.y.lower = icmp uge i8 %y, 3
108 call void @llvm.assume(i1 %cmp.y.lower)
109 %cmp.y.upper = icmp ule i8 %y, 4
110 call void @llvm.assume(i1 %cmp.y.upper)
111 %div = udiv i8 %x, %y
114 define i8 @variable.v4(i8 %x, i8 %y) {
115 ; CHECK-LABEL: @variable.v4(
116 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 4
117 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
118 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp uge i8 [[Y:%.*]], 3
119 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
120 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
121 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
122 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
123 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
124 ; CHECK-NEXT: ret i8 [[DIV]]
126 %cmp.x = icmp ult i8 %x, 4
127 call void @llvm.assume(i1 %cmp.x)
128 %cmp.y.lower = icmp uge i8 %y, 3
129 call void @llvm.assume(i1 %cmp.y.lower)
130 %cmp.y.upper = icmp ule i8 %y, 4
131 call void @llvm.assume(i1 %cmp.y.upper)
132 %div = udiv i8 %x, %y
135 define i8 @variable.v4.range(ptr %x.ptr, ptr %y.ptr) {
136 ; CHECK-LABEL: @variable.v4.range(
137 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG0]]
138 ; CHECK-NEXT: [[Y:%.*]] = load i8, ptr [[Y_PTR:%.*]], align 1, !range [[RNG1:![0-9]+]]
139 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
140 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
141 ; CHECK-NEXT: ret i8 [[DIV]]
143 %x = load i8, ptr %x.ptr, !range !{ i8 0, i8 4 }
144 %y = load i8, ptr %y.ptr, !range !{ i8 3, i8 5 }
145 %div = udiv i8 %x, %y
148 define i8 @variable.v5(i8 %x, i8 %y) {
149 ; CHECK-LABEL: @variable.v5(
150 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 5
151 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
152 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp uge i8 [[Y:%.*]], 3
153 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
154 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
155 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
156 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
157 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
158 ; CHECK-NEXT: ret i8 [[DIV]]
160 %cmp.x = icmp ult i8 %x, 5
161 call void @llvm.assume(i1 %cmp.x)
162 %cmp.y.lower = icmp uge i8 %y, 3
163 call void @llvm.assume(i1 %cmp.y.lower)
164 %cmp.y.upper = icmp ule i8 %y, 4
165 call void @llvm.assume(i1 %cmp.y.upper)
166 %div = udiv i8 %x, %y
169 define i8 @variable.v6(i8 %x, i8 %y) {
170 ; CHECK-LABEL: @variable.v6(
171 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 6
172 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
173 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp uge i8 [[Y:%.*]], 3
174 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
175 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
176 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
177 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
178 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
179 ; CHECK-NEXT: ret i8 [[DIV]]
181 %cmp.x = icmp ult i8 %x, 6
182 call void @llvm.assume(i1 %cmp.x)
183 %cmp.y.lower = icmp uge i8 %y, 3
184 call void @llvm.assume(i1 %cmp.y.lower)
185 %cmp.y.upper = icmp ule i8 %y, 4
186 call void @llvm.assume(i1 %cmp.y.upper)
187 %div = udiv i8 %x, %y
190 define i8 @variable.v7(i8 %x, i8 %y) {
191 ; CHECK-LABEL: @variable.v7(
192 ; CHECK-NEXT: [[CMP_X:%.*]] = icmp ult i8 [[X:%.*]], 7
193 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X]])
194 ; CHECK-NEXT: [[CMP_Y_LOWER:%.*]] = icmp uge i8 [[Y:%.*]], 3
195 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
196 ; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
197 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
198 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], [[Y]]
199 ; CHECK-NEXT: ret i8 [[DIV]]
201 %cmp.x = icmp ult i8 %x, 7
202 call void @llvm.assume(i1 %cmp.x)
203 %cmp.y.lower = icmp uge i8 %y, 3
204 call void @llvm.assume(i1 %cmp.y.lower)
205 %cmp.y.upper = icmp ule i8 %y, 4
206 call void @llvm.assume(i1 %cmp.y.upper)
207 %div = udiv i8 %x, %y
213 define i8 @large.divisor.v0(i8 %x) {
214 ; CHECK-LABEL: @large.divisor.v0(
215 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 127
216 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
217 ; CHECK-NEXT: ret i8 0
219 %cmp.x.upper = icmp ult i8 %x, 127
220 call void @llvm.assume(i1 %cmp.x.upper)
221 %div = udiv i8 %x, 127
224 define i8 @large.divisor.v1(i8 %x) {
225 ; CHECK-LABEL: @large.divisor.v1(
226 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -128
227 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
228 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 127
229 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
230 ; CHECK-NEXT: ret i8 [[DIV]]
232 %cmp.x.upper = icmp ult i8 %x, 128
233 call void @llvm.assume(i1 %cmp.x.upper)
234 %div = udiv i8 %x, 127
237 define i8 @large.divisor.v1.range(ptr %x.ptr) {
238 ; CHECK-LABEL: @large.divisor.v1.range(
239 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG2:![0-9]+]]
240 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 127
241 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
242 ; CHECK-NEXT: ret i8 [[DIV]]
244 %x = load i8, ptr %x.ptr, !range !{ i8 0, i8 128 }
245 %div = udiv i8 %x, 127
248 define i8 @large.divisor.v2.unbound.x(i8 %x) {
249 ; CHECK-LABEL: @large.divisor.v2.unbound.x(
250 ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X:%.*]], 127
251 ; CHECK-NEXT: ret i8 [[DIV]]
253 %div = udiv i8 %x, 127
257 define i8 @large.divisor.with.overflow.v0(i8 %x) {
258 ; CHECK-LABEL: @large.divisor.with.overflow.v0(
259 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -128
260 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
261 ; CHECK-NEXT: ret i8 0
263 %cmp.x.upper = icmp ult i8 %x, 128
264 call void @llvm.assume(i1 %cmp.x.upper)
265 %div = udiv i8 %x, 128
268 define i8 @large.divisor.with.overflow.v1(i8 %x) {
269 ; CHECK-LABEL: @large.divisor.with.overflow.v1(
270 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -127
271 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
272 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], -128
273 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
274 ; CHECK-NEXT: ret i8 [[DIV]]
276 %cmp.x.upper = icmp ult i8 %x, 129
277 call void @llvm.assume(i1 %cmp.x.upper)
278 %div = udiv i8 %x, 128
281 define i8 @large.divisor.with.overflow.v1.range(ptr %x.ptr) {
282 ; CHECK-LABEL: @large.divisor.with.overflow.v1.range(
283 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG3:![0-9]+]]
284 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], -128
285 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
286 ; CHECK-NEXT: ret i8 [[DIV]]
288 %x = load i8, ptr %x.ptr, !range !{ i8 0, i8 129 }
289 %div = udiv i8 %x, 128
292 define i8 @large.divisor.with.overflow.v2.unbound.x(i8 %x) {
293 ; CHECK-LABEL: @large.divisor.with.overflow.v2.unbound.x(
294 ; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X:%.*]], -128
295 ; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
296 ; CHECK-NEXT: ret i8 [[DIV]]
298 %div = udiv i8 %x, 128
302 define i8 @known_uge(i8 noundef %x) {
303 ; CHECK-LABEL: @known_uge(
304 ; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 6
305 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
306 ; CHECK-NEXT: [[CMP_X_LOWER:%.*]] = icmp uge i8 [[X]], 3
307 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_LOWER]])
308 ; CHECK-NEXT: ret i8 1
310 %cmp.x.upper = icmp ult i8 %x, 6
311 call void @llvm.assume(i1 %cmp.x.upper)
312 %cmp.x.lower = icmp uge i8 %x, 3
313 call void @llvm.assume(i1 %cmp.x.lower)