1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes="loop-reduce,loop-term-fold" -S | FileCheck %s
4 target datalayout = "e-p:64:64:64-n64"
6 define void @const_tripcount(ptr %a) {
7 ; CHECK-LABEL: @const_tripcount(
9 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
10 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
11 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
13 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
14 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
15 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
16 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
17 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
19 ; CHECK-NEXT: ret void
22 %uglygep = getelementptr i8, ptr %a, i64 84
25 for.body: ; preds = %for.body, %entry
26 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
27 %lsr.iv = phi i64 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
28 store i32 1, ptr %lsr.iv1, align 4
29 %lsr.iv.next = add nsw i64 %lsr.iv, -1
30 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
31 %exitcond.not = icmp eq i64 %lsr.iv.next, 0
32 br i1 %exitcond.not, label %for.end, label %for.body
34 for.end: ; preds = %for.body
38 define void @runtime_tripcount(ptr %a, i32 %N) {
39 ; CHECK-LABEL: @runtime_tripcount(
41 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
42 ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i32 [[N:%.*]], -1
43 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
44 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
45 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 88
46 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
47 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
49 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
50 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
51 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
52 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
53 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
55 ; CHECK-NEXT: ret void
58 %uglygep = getelementptr i8, ptr %a, i32 84
61 for.body: ; preds = %for.body, %entry
62 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
63 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ %N, %entry ]
64 store i32 1, ptr %lsr.iv1, align 4
65 %lsr.iv.next = add nsw i32 %lsr.iv, -1
66 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
67 %exitcond.not = icmp eq i32 %lsr.iv.next, 0
68 br i1 %exitcond.not, label %for.end, label %for.body
70 for.end: ; preds = %for.body
74 ; In this case, the i8 IVs increment *isn't* nsw. As a result, a N of 0
75 ; is well defined, and thus the post-inc starts at 255.
76 define void @wrap_around(ptr %a, i8 %N) {
77 ; CHECK-LABEL: @wrap_around(
79 ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[N:%.*]], -1
80 ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i64
81 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
82 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 4
83 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP3]]
84 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
86 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[A]], [[ENTRY:%.*]] ]
87 ; CHECK-NEXT: store i8 1, ptr [[LSR_IV1]], align 4
88 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
89 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
90 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
92 ; CHECK-NEXT: ret void
97 for.body: ; preds = %for.body, %entry
98 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %a, %entry ]
99 %lsr.iv = phi i8 [ %lsr.iv.next, %for.body ], [ %N, %entry ]
100 store i8 1, ptr %lsr.iv1, align 4
101 %lsr.iv.next = add i8 %lsr.iv, -1
102 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
103 %exitcond.not = icmp eq i8 %lsr.iv.next, 0
104 br i1 %exitcond.not, label %for.end, label %for.body
106 for.end: ; preds = %for.body
110 ; The replacing AddRec IV is a complicated AddRec. This tests whether
111 ; the fold terminating condition transformation is writing new terminating
112 ; condition in the correct type.
113 define void @ptr_of_ptr_addrec(ptr %ptrptr, i32 %length) {
114 ; CHECK-LABEL: @ptr_of_ptr_addrec(
116 ; CHECK-NEXT: [[START_PTRPTR1:%.*]] = getelementptr inbounds ptr, ptr [[START_PTRPTR:%.*]]
117 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LENGTH:%.*]], -1
118 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
119 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
120 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 8
121 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[START_PTRPTR]], i64 [[TMP3]]
122 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
124 ; CHECK-NEXT: [[IT_04:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[START_PTRPTR1]], [[ENTRY:%.*]] ]
125 ; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[IT_04]], align 8
126 ; CHECK-NEXT: tail call void @foo(ptr [[TMP4]])
127 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr ptr, ptr [[IT_04]], i64 1
128 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[INCDEC_PTR]], [[SCEVGEP]]
129 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
131 ; CHECK-NEXT: ret void
134 %start.ptrptr = getelementptr inbounds ptr, ptr %ptrptr
137 for.body: ; preds = %entry, %for.body
138 %i.05 = phi i32 [ %dec, %for.body ], [ %length, %entry ]
139 %it.04 = phi ptr [ %incdec.ptr, %for.body ], [ %start.ptrptr, %entry ]
140 %0 = load ptr, ptr %it.04, align 8
141 tail call void @foo(ptr %0)
142 %incdec.ptr = getelementptr inbounds ptr, ptr %it.04, i64 1
143 %dec = add nsw i32 %i.05, -1
144 %tobool.not = icmp eq i32 %dec, 0
145 br i1 %tobool.not, label %for.end, label %for.body
147 for.end: ; preds = %for.body
151 declare void @foo(ptr)
153 define void @iv_start_non_preheader(ptr %mark, i32 signext %length) {
154 ; CHECK-LABEL: @iv_start_non_preheader(
156 ; CHECK-NEXT: [[TOBOOL_NOT3:%.*]] = icmp eq i32 [[LENGTH:%.*]], 0
157 ; CHECK-NEXT: br i1 [[TOBOOL_NOT3]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
158 ; CHECK: for.body.preheader:
159 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LENGTH]], -1
160 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
161 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
162 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 8
163 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[MARK:%.*]], i64 [[TMP3]]
164 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
165 ; CHECK: for.cond.cleanup.loopexit:
166 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
167 ; CHECK: for.cond.cleanup:
168 ; CHECK-NEXT: ret void
170 ; CHECK-NEXT: [[DST_04:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[MARK]], [[FOR_BODY_PREHEADER]] ]
171 ; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DST_04]], align 8
172 ; CHECK-NEXT: [[TMP5:%.*]] = call ptr @foo(ptr [[TMP4]])
173 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr ptr, ptr [[DST_04]], i64 1
174 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[INCDEC_PTR]], [[SCEVGEP]]
175 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]]
178 %tobool.not3 = icmp eq i32 %length, 0
179 br i1 %tobool.not3, label %for.cond.cleanup, label %for.body
181 for.cond.cleanup: ; preds = %for.body, %entry
184 for.body: ; preds = %entry, %for.body
185 %i.05 = phi i32 [ %dec, %for.body ], [ %length, %entry ]
186 %dst.04 = phi ptr [ %incdec.ptr, %for.body ], [ %mark, %entry ]
187 %0 = load ptr, ptr %dst.04, align 8
188 call ptr @foo(ptr %0)
189 %incdec.ptr = getelementptr inbounds ptr, ptr %dst.04, i64 1
190 %dec = add nsw i32 %i.05, -1
191 %tobool.not = icmp eq i32 %dec, 0
192 br i1 %tobool.not, label %for.cond.cleanup, label %for.body
195 ; Consider the case where %a points to a buffer exactly 17 bytes long. The
196 ; loop below will access bytes: 0, 4, 8, and 16. The key bit is that we
197 ; advance the pointer IV by *4* each time, and thus on the iteration we write
198 ; byte 16, %uglygep2 (the pointer increment) is past the end of the underlying
199 ; storage and thus violates the inbounds requirements. As a result, %uglygep2
200 ; is poison on the final iteration. If we insert a branch on that value
201 ; (without stripping the poison flag), we have inserted undefined behavior
202 ; where it did not previously exist.
203 define void @inbounds_poison_use(ptr %a) {
204 ; CHECK-LABEL: @inbounds_poison_use(
206 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 16
207 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
209 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[A]], [[ENTRY:%.*]] ]
210 ; CHECK-NEXT: store i8 1, ptr [[LSR_IV1]], align 4
211 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
212 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
213 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
215 ; CHECK-NEXT: ret void
220 for.body: ; preds = %for.body, %entry
221 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %a, %entry ]
222 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 4, %entry ]
223 store i8 1, ptr %lsr.iv1, align 4
224 %lsr.iv.next = add nsw i32 %lsr.iv, -1
225 %uglygep2 = getelementptr inbounds i8, ptr %lsr.iv1, i64 4
226 %exitcond.not = icmp eq i32 %lsr.iv.next, 0
227 br i1 %exitcond.not, label %for.end, label %for.body
229 for.end: ; preds = %for.body
233 ; In this case, the integer IV has a larger bitwidth than the pointer IV.
234 ; This means that the smaller IV may wrap around multiple times before
235 ; the original loop exit is taken.
236 define void @iv_size(ptr %a, i128 %N) {
237 ; CHECK-LABEL: @iv_size(
239 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
241 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
242 ; CHECK-NEXT: [[LSR_IV:%.*]] = phi i128 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N:%.*]], [[ENTRY]] ]
243 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
244 ; CHECK-NEXT: [[LSR_IV_NEXT]] = add nsw i128 [[LSR_IV]], -1
245 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
246 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i128 [[LSR_IV_NEXT]], 0
247 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
249 ; CHECK-NEXT: ret void
254 for.body: ; preds = %for.body, %entry
255 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %a, %entry ]
256 %lsr.iv = phi i128 [ %lsr.iv.next, %for.body ], [ %N, %entry ]
257 store i32 1, ptr %lsr.iv1, align 4
258 %lsr.iv.next = add nsw i128 %lsr.iv, -1
259 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
260 %exitcond.not = icmp eq i128 %lsr.iv.next, 0
261 br i1 %exitcond.not, label %for.end, label %for.body
263 for.end: ; preds = %for.body
267 ; To check correct folding not equality terminating condition
268 ; Due to SLE offset must be - 1600
269 define void @IcmpSle(ptr %a) {
270 ; CHECK-LABEL: @IcmpSle(
272 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
273 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
274 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
276 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
277 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
278 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
279 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
280 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
282 ; CHECK-NEXT: ret void
285 %uglygep = getelementptr i8, ptr %a, i32 84
288 for.body: ; preds = %for.body, %entry
289 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
290 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
291 store i32 1, ptr %lsr.iv1, align 4
292 %lsr.iv.next = add nsw i32 %lsr.iv, -1
293 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
294 %exitcond.not = icmp sle i32 %lsr.iv.next, 0
295 br i1 %exitcond.not, label %for.end, label %for.body
297 for.end: ; preds = %for.body
301 ; Due to SLT offset must be - 1604
302 define void @IcmpSlt(ptr %a) {
303 ; CHECK-LABEL: @IcmpSlt(
305 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
306 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1604
307 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
309 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
310 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
311 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
312 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
313 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
315 ; CHECK-NEXT: ret void
318 %uglygep = getelementptr i8, ptr %a, i32 84
321 for.body: ; preds = %for.body, %entry
322 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
323 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
324 store i32 1, ptr %lsr.iv1, align 4
325 %lsr.iv.next = add nsw i32 %lsr.iv, -1
326 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
327 %exitcond.not = icmp slt i32 %lsr.iv.next, 0
328 br i1 %exitcond.not, label %for.end, label %for.body
330 for.end: ; preds = %for.body
334 ; Invert predicate and branches
335 define void @IcmpSgt(ptr %a) {
336 ; CHECK-LABEL: @IcmpSgt(
338 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
339 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
340 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
342 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
343 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
344 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
345 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
346 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
348 ; CHECK-NEXT: ret void
351 %uglygep = getelementptr i8, ptr %a, i32 84
354 for.body: ; preds = %for.body, %entry
355 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
356 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
357 store i32 1, ptr %lsr.iv1, align 4
358 %lsr.iv.next = add nsw i32 %lsr.iv, -1
359 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
360 %exitcond.not = icmp sgt i32 %lsr.iv.next, 0
361 br i1 %exitcond.not, label %for.body, label %for.end
363 for.end: ; preds = %for.body
367 ; Invert predicate and branches
368 define void @SeveralLoopLatch(ptr %a) {
369 ; CHECK-LABEL: @SeveralLoopLatch(
371 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
372 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
373 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
375 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[ANOTHER_BRANCH:%.*]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
376 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
377 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
378 ; CHECK-NEXT: br label [[ANOTHER_BRANCH]]
379 ; CHECK: another.branch:
380 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
381 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
383 ; CHECK-NEXT: ret void
386 %uglygep = getelementptr i8, ptr %a, i32 84
389 for.body: ; preds = %another.branch, %entry
390 %lsr.iv1 = phi ptr [ %uglygep2, %another.branch ], [ %uglygep, %entry ]
391 %lsr.iv = phi i32 [ %lsr.iv.next, %another.branch ], [ 379, %entry ]
392 store i32 1, ptr %lsr.iv1, align 4
393 %lsr.iv.next = add nsw i32 %lsr.iv, -1
394 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
395 br label %another.branch
398 %exitcond.not = icmp sgt i32 %lsr.iv.next, 0
399 br i1 %exitcond.not, label %for.body, label %for.end
401 for.end: ; preds = %for.body
405 ; Invert branch in SeveralLoopLatch
406 define void @SeveralLoopLatch2(ptr %a) {
407 ; CHECK-LABEL: @SeveralLoopLatch2(
409 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
410 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
411 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
413 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[ANOTHER_BRANCH:%.*]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
414 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
415 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
416 ; CHECK-NEXT: br label [[ANOTHER_BRANCH]]
417 ; CHECK: another.branch:
418 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
419 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
421 ; CHECK-NEXT: ret void
424 %uglygep = getelementptr i8, ptr %a, i32 84
427 for.body: ; preds = %another.branch, %entry
428 %lsr.iv1 = phi ptr [ %uglygep2, %another.branch ], [ %uglygep, %entry ]
429 %lsr.iv = phi i32 [ %lsr.iv.next, %another.branch ], [ 379, %entry ]
430 store i32 1, ptr %lsr.iv1, align 4
431 %lsr.iv.next = add nsw i32 %lsr.iv, -1
432 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
433 br label %another.branch
436 %exitcond.not = icmp sle i32 %lsr.iv.next, 0
437 br i1 %exitcond.not, label %for.end, label %for.body
439 for.end: ; preds = %for.body
444 define void @non_branch_terminator(ptr %a) {
445 ; CHECK-LABEL: @non_branch_terminator(
447 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
448 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
450 ; CHECK-NEXT: [[LSR_IV2:%.*]] = phi i64 [ [[LSR_IV_NEXT3:%.*]], [[FOR_BODY]] ], [ 378, [[ENTRY:%.*]] ]
451 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY]] ]
452 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
453 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
454 ; CHECK-NEXT: [[LSR_IV_NEXT3]] = add nsw i64 [[LSR_IV2]], -1
455 ; CHECK-NEXT: switch i64 [[LSR_IV2]], label [[FOR_BODY]] [
456 ; CHECK-NEXT: i64 0, label [[FOR_END:%.*]]
459 ; CHECK-NEXT: ret void
462 %uglygep = getelementptr i8, ptr %a, i64 84
465 for.body: ; preds = %for.body, %entry
466 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
467 %lsr.iv = phi i64 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
468 store i32 1, ptr %lsr.iv1, align 4
469 %lsr.iv.next = add nsw i64 %lsr.iv, -1
470 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
471 switch i64 %lsr.iv.next, label %for.body [i64 0, label %for.end]
473 for.end: ; preds = %for.body
477 ;; The next step of tests exercise various cases with the expansion
478 ;; budget and different trip counts or estimated trip counts.
480 define void @profiled_short_tc(ptr %a, i32 %offset, i32 %n) {
481 ; CHECK-LABEL: @profiled_short_tc(
483 ; CHECK-NEXT: [[OFFSET_NONZERO:%.*]] = or i32 [[OFFSET:%.*]], 1
484 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
485 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
487 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
488 ; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
489 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
490 ; CHECK-NEXT: [[LSR_IV_NEXT]] = add nsw i32 [[LSR_IV]], 1
491 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 [[OFFSET_NONZERO]]
492 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LSR_IV_NEXT]], [[N:%.*]]
493 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !prof [[PROF0:![0-9]+]]
495 ; CHECK-NEXT: ret void
498 %offset.nonzero = or i32 %offset, 1
499 %uglygep = getelementptr i8, ptr %a, i64 84
502 for.body: ; preds = %for.body, %entry
503 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
504 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 0, %entry ]
505 store i32 1, ptr %lsr.iv1, align 4
506 %lsr.iv.next = add nsw i32 %lsr.iv, 1
507 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 %offset.nonzero
508 %exitcond.not = icmp eq i32 %lsr.iv.next, %n
509 br i1 %exitcond.not, label %for.end, label %for.body, !prof !{!"branch_weights", i32 1, i32 3}
511 for.end: ; preds = %for.body
515 define void @profiled_long_tc(ptr %a, i32 %offset, i32 %n) {
516 ; CHECK-LABEL: @profiled_long_tc(
518 ; CHECK-NEXT: [[OFFSET_NONZERO:%.*]] = or i32 [[OFFSET:%.*]], 1
519 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
520 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
521 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
522 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
523 ; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[OFFSET_NONZERO]] to i64
524 ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP2]], [[TMP3]]
525 ; CHECK-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP4]], 84
526 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
527 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
529 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
530 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
531 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 [[OFFSET_NONZERO]]
532 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
533 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]], !prof [[PROF1:![0-9]+]]
535 ; CHECK-NEXT: ret void
538 %offset.nonzero = or i32 %offset, 1
539 %uglygep = getelementptr i8, ptr %a, i64 84
542 for.body: ; preds = %for.body, %entry
543 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
544 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 0, %entry ]
545 store i32 1, ptr %lsr.iv1, align 4
546 %lsr.iv.next = add nsw i32 %lsr.iv, 1
547 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 %offset.nonzero
548 %exitcond.not = icmp eq i32 %lsr.iv.next, %n
549 br i1 %exitcond.not, label %for.end, label %for.body, !prof !{!"branch_weights", i32 1, i32 300}
551 for.end: ; preds = %for.body
555 define void @unknown_tc(ptr %a, i32 %offset, i32 %n) {
556 ; CHECK-LABEL: @unknown_tc(
558 ; CHECK-NEXT: [[OFFSET_NONZERO:%.*]] = or i32 [[OFFSET:%.*]], 1
559 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
560 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
561 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
562 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
563 ; CHECK-NEXT: [[TMP3:%.*]] = sext i32 [[OFFSET_NONZERO]] to i64
564 ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP2]], [[TMP3]]
565 ; CHECK-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP4]], 84
566 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]]
567 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
569 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
570 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
571 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 [[OFFSET_NONZERO]]
572 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
573 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
575 ; CHECK-NEXT: ret void
578 %offset.nonzero = or i32 %offset, 1
579 %uglygep = getelementptr i8, ptr %a, i64 84
582 for.body: ; preds = %for.body, %entry
583 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
584 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 0, %entry ]
585 store i32 1, ptr %lsr.iv1, align 4
586 %lsr.iv.next = add nsw i32 %lsr.iv, 1
587 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 %offset.nonzero
588 %exitcond.not = icmp eq i32 %lsr.iv.next, %n
589 br i1 %exitcond.not, label %for.end, label %for.body
591 for.end: ; preds = %for.body
595 define void @unknown_tc2(ptr %a, i32 %offset, i32 %n, i32 %step) mustprogress {
596 ; CHECK-LABEL: @unknown_tc2(
598 ; CHECK-NEXT: [[OFFSET_NONZERO:%.*]] = or i32 [[OFFSET:%.*]], 1
599 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
600 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
602 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
603 ; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
604 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
605 ; CHECK-NEXT: [[LSR_IV_NEXT]] = add nsw i32 [[LSR_IV]], [[STEP:%.*]]
606 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 [[OFFSET_NONZERO]]
607 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp sge i32 [[LSR_IV_NEXT]], [[N:%.*]]
608 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
610 ; CHECK-NEXT: ret void
613 %offset.nonzero = or i32 %offset, 1
614 %uglygep = getelementptr i8, ptr %a, i64 84
617 for.body: ; preds = %for.body, %entry
618 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
619 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 0, %entry ]
620 store i32 1, ptr %lsr.iv1, align 4
621 %lsr.iv.next = add nsw i32 %lsr.iv, %step
622 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 %offset.nonzero
623 %exitcond.not = icmp sge i32 %lsr.iv.next, %n
624 br i1 %exitcond.not, label %for.end, label %for.body
626 for.end: ; preds = %for.body
630 define void @small_tc_trivial_loop(ptr %a, i32 %offset) {
631 ; CHECK-LABEL: @small_tc_trivial_loop(
633 ; CHECK-NEXT: [[OFFSET_NONZERO:%.*]] = or i32 [[OFFSET:%.*]], 1
634 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
635 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
637 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
638 ; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
639 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
640 ; CHECK-NEXT: [[LSR_IV_NEXT]] = add nsw i32 [[LSR_IV]], 1
641 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 [[OFFSET_NONZERO]]
642 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LSR_IV_NEXT]], 1
643 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
645 ; CHECK-NEXT: ret void
648 %offset.nonzero = or i32 %offset, 1
649 %uglygep = getelementptr i8, ptr %a, i64 84
652 for.body: ; preds = %for.body, %entry
653 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
654 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 0, %entry ]
655 store i32 1, ptr %lsr.iv1, align 4
656 %lsr.iv.next = add nsw i32 %lsr.iv, 1
657 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 %offset.nonzero
658 %exitcond.not = icmp eq i32 %lsr.iv.next, 1
659 br i1 %exitcond.not, label %for.end, label %for.body
661 for.end: ; preds = %for.body
665 define void @small_tc_below_threshold(ptr %a, i32 %offset) {
666 ; CHECK-LABEL: @small_tc_below_threshold(
668 ; CHECK-NEXT: [[OFFSET_NONZERO:%.*]] = or i32 [[OFFSET:%.*]], 1
669 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
670 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
672 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
673 ; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
674 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
675 ; CHECK-NEXT: [[LSR_IV_NEXT]] = add nsw i32 [[LSR_IV]], 1
676 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 [[OFFSET_NONZERO]]
677 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LSR_IV_NEXT]], 2
678 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
680 ; CHECK-NEXT: ret void
683 %offset.nonzero = or i32 %offset, 1
684 %uglygep = getelementptr i8, ptr %a, i64 84
687 for.body: ; preds = %for.body, %entry
688 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
689 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 0, %entry ]
690 store i32 1, ptr %lsr.iv1, align 4
691 %lsr.iv.next = add nsw i32 %lsr.iv, 1
692 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 %offset.nonzero
693 %exitcond.not = icmp eq i32 %lsr.iv.next, 2
694 br i1 %exitcond.not, label %for.end, label %for.body
696 for.end: ; preds = %for.body
700 define void @small_tc_above_threshold(ptr %a, i32 %offset) {
701 ; CHECK-LABEL: @small_tc_above_threshold(
703 ; CHECK-NEXT: [[OFFSET_NONZERO:%.*]] = or i32 [[OFFSET:%.*]], 1
704 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
705 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[OFFSET_NONZERO]] to i64
706 ; CHECK-NEXT: [[TMP1:%.*]] = mul nsw i64 [[TMP0]], 10
707 ; CHECK-NEXT: [[TMP2:%.*]] = add nsw i64 [[TMP1]], 84
708 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
709 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
711 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
712 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
713 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 [[OFFSET_NONZERO]]
714 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
715 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
717 ; CHECK-NEXT: ret void
720 %offset.nonzero = or i32 %offset, 1
721 %uglygep = getelementptr i8, ptr %a, i64 84
724 for.body: ; preds = %for.body, %entry
725 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
726 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 0, %entry ]
727 store i32 1, ptr %lsr.iv1, align 4
728 %lsr.iv.next = add nsw i32 %lsr.iv, 1
729 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 %offset.nonzero
730 %exitcond.not = icmp eq i32 %lsr.iv.next, 10
731 br i1 %exitcond.not, label %for.end, label %for.body
733 for.end: ; preds = %for.body