1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes="loop-reduce" -S -lsr-term-fold | FileCheck %s
4 target datalayout = "e-p:64:64:64-n64"
6 define void @const_tripcount(ptr %a) {
7 ; CHECK-LABEL: @const_tripcount(
9 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
10 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
11 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
13 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
14 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
15 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
16 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
17 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
19 ; CHECK-NEXT: ret void
22 %uglygep = getelementptr i8, ptr %a, i64 84
25 for.body: ; preds = %for.body, %entry
26 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
27 %lsr.iv = phi i64 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
28 store i32 1, ptr %lsr.iv1, align 4
29 %lsr.iv.next = add nsw i64 %lsr.iv, -1
30 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
31 %exitcond.not = icmp eq i64 %lsr.iv.next, 0
32 br i1 %exitcond.not, label %for.end, label %for.body
34 for.end: ; preds = %for.body
38 define void @runtime_tripcount(ptr %a, i32 %N) {
39 ; CHECK-LABEL: @runtime_tripcount(
41 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
42 ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i32 [[N:%.*]], -1
43 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
44 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
45 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 88
46 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
47 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
49 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
50 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
51 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
52 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
53 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
55 ; CHECK-NEXT: ret void
58 %uglygep = getelementptr i8, ptr %a, i32 84
61 for.body: ; preds = %for.body, %entry
62 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
63 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ %N, %entry ]
64 store i32 1, ptr %lsr.iv1, align 4
65 %lsr.iv.next = add nsw i32 %lsr.iv, -1
66 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
67 %exitcond.not = icmp eq i32 %lsr.iv.next, 0
68 br i1 %exitcond.not, label %for.end, label %for.body
70 for.end: ; preds = %for.body
74 ; In this case, the i8 IVs increment *isn't* nsw. As a result, a N of 0
75 ; is well defined, and thus the post-inc starts at 255.
76 define void @wrap_around(ptr %a, i8 %N) {
77 ; CHECK-LABEL: @wrap_around(
79 ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[N:%.*]], -1
80 ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i64
81 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
82 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 4
83 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP3]]
84 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
86 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[A]], [[ENTRY:%.*]] ]
87 ; CHECK-NEXT: store i8 1, ptr [[LSR_IV1]], align 4
88 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
89 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
90 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
92 ; CHECK-NEXT: ret void
97 for.body: ; preds = %for.body, %entry
98 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %a, %entry ]
99 %lsr.iv = phi i8 [ %lsr.iv.next, %for.body ], [ %N, %entry ]
100 store i8 1, ptr %lsr.iv1, align 4
101 %lsr.iv.next = add i8 %lsr.iv, -1
102 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
103 %exitcond.not = icmp eq i8 %lsr.iv.next, 0
104 br i1 %exitcond.not, label %for.end, label %for.body
106 for.end: ; preds = %for.body
110 ; The replacing AddRec IV is a complicated AddRec. This tests whether
111 ; the fold terminating condition transformation is writing new terminating
112 ; condition in the correct type.
113 define void @ptr_of_ptr_addrec(ptr %ptrptr, i32 %length) {
114 ; CHECK-LABEL: @ptr_of_ptr_addrec(
116 ; CHECK-NEXT: [[START_PTRPTR:%.*]] = getelementptr ptr, ptr [[PTRPTR:%.*]]
117 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LENGTH:%.*]], -1
118 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
119 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
120 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 8
121 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[START_PTRPTR]], i64 [[TMP3]]
122 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
124 ; CHECK-NEXT: [[IT_04:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[START_PTRPTR]], [[ENTRY:%.*]] ]
125 ; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[IT_04]], align 8
126 ; CHECK-NEXT: tail call void @foo(ptr [[TMP4]])
127 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr ptr, ptr [[IT_04]], i64 1
128 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[INCDEC_PTR]], [[SCEVGEP]]
129 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
131 ; CHECK-NEXT: ret void
134 %start.ptrptr = getelementptr inbounds ptr, ptr %ptrptr
137 for.body: ; preds = %entry, %for.body
138 %i.05 = phi i32 [ %dec, %for.body ], [ %length, %entry ]
139 %it.04 = phi ptr [ %incdec.ptr, %for.body ], [ %start.ptrptr, %entry ]
140 %0 = load ptr, ptr %it.04, align 8
141 tail call void @foo(ptr %0)
142 %incdec.ptr = getelementptr inbounds ptr, ptr %it.04, i64 1
143 %dec = add nsw i32 %i.05, -1
144 %tobool.not = icmp eq i32 %dec, 0
145 br i1 %tobool.not, label %for.end, label %for.body
147 for.end: ; preds = %for.body
151 declare void @foo(ptr)
153 define void @iv_start_non_preheader(ptr %mark, i32 signext %length) {
154 ; CHECK-LABEL: @iv_start_non_preheader(
156 ; CHECK-NEXT: [[TOBOOL_NOT3:%.*]] = icmp eq i32 [[LENGTH:%.*]], 0
157 ; CHECK-NEXT: br i1 [[TOBOOL_NOT3]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
158 ; CHECK: for.body.preheader:
159 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[LENGTH]], -1
160 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
161 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
162 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 8
163 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[MARK:%.*]], i64 [[TMP3]]
164 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
165 ; CHECK: for.cond.cleanup.loopexit:
166 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
167 ; CHECK: for.cond.cleanup:
168 ; CHECK-NEXT: ret void
170 ; CHECK-NEXT: [[DST_04:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[MARK]], [[FOR_BODY_PREHEADER]] ]
171 ; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DST_04]], align 8
172 ; CHECK-NEXT: [[TMP5:%.*]] = call ptr @foo(ptr [[TMP4]])
173 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr ptr, ptr [[DST_04]], i64 1
174 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[INCDEC_PTR]], [[SCEVGEP]]
175 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]]
178 %tobool.not3 = icmp eq i32 %length, 0
179 br i1 %tobool.not3, label %for.cond.cleanup, label %for.body
181 for.cond.cleanup: ; preds = %for.body, %entry
184 for.body: ; preds = %entry, %for.body
185 %i.05 = phi i32 [ %dec, %for.body ], [ %length, %entry ]
186 %dst.04 = phi ptr [ %incdec.ptr, %for.body ], [ %mark, %entry ]
187 %0 = load ptr, ptr %dst.04, align 8
188 call ptr @foo(ptr %0)
189 %incdec.ptr = getelementptr inbounds ptr, ptr %dst.04, i64 1
190 %dec = add nsw i32 %i.05, -1
191 %tobool.not = icmp eq i32 %dec, 0
192 br i1 %tobool.not, label %for.cond.cleanup, label %for.body
195 ; Consider the case where %a points to a buffer exactly 17 bytes long. The
196 ; loop below will access bytes: 0, 4, 8, and 16. The key bit is that we
197 ; advance the pointer IV by *4* each time, and thus on the iteration we write
198 ; byte 16, %uglygep2 (the pointer increment) is past the end of the underlying
199 ; storage and thus violates the inbounds requirements. As a result, %uglygep2
200 ; is poison on the final iteration. If we insert a branch on that value
201 ; (without stripping the poison flag), we have inserted undefined behavior
202 ; where it did not previously exist.
203 define void @inbounds_poison_use(ptr %a) {
204 ; CHECK-LABEL: @inbounds_poison_use(
206 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 16
207 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
209 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[A]], [[ENTRY:%.*]] ]
210 ; CHECK-NEXT: store i8 1, ptr [[LSR_IV1]], align 4
211 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
212 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
213 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
215 ; CHECK-NEXT: ret void
220 for.body: ; preds = %for.body, %entry
221 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %a, %entry ]
222 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 4, %entry ]
223 store i8 1, ptr %lsr.iv1, align 4
224 %lsr.iv.next = add nsw i32 %lsr.iv, -1
225 %uglygep2 = getelementptr inbounds i8, ptr %lsr.iv1, i64 4
226 %exitcond.not = icmp eq i32 %lsr.iv.next, 0
227 br i1 %exitcond.not, label %for.end, label %for.body
229 for.end: ; preds = %for.body
233 ; In this case, the integer IV has a larger bitwidth than the pointer IV.
234 ; This means that the smaller IV may wrap around multiple times before
235 ; the original loop exit is taken.
236 define void @iv_size(ptr %a, i128 %N) {
237 ; CHECK-LABEL: @iv_size(
239 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
241 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ]
242 ; CHECK-NEXT: [[LSR_IV:%.*]] = phi i128 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N:%.*]], [[ENTRY]] ]
243 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
244 ; CHECK-NEXT: [[LSR_IV_NEXT]] = add nsw i128 [[LSR_IV]], -1
245 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
246 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i128 [[LSR_IV_NEXT]], 0
247 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
249 ; CHECK-NEXT: ret void
254 for.body: ; preds = %for.body, %entry
255 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %a, %entry ]
256 %lsr.iv = phi i128 [ %lsr.iv.next, %for.body ], [ %N, %entry ]
257 store i32 1, ptr %lsr.iv1, align 4
258 %lsr.iv.next = add nsw i128 %lsr.iv, -1
259 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
260 %exitcond.not = icmp eq i128 %lsr.iv.next, 0
261 br i1 %exitcond.not, label %for.end, label %for.body
263 for.end: ; preds = %for.body
267 ; To check correct folding not equality terminating condition
268 ; Due to SLE offset must be - 1600
269 define void @IcmpSle(ptr %a) {
270 ; CHECK-LABEL: @IcmpSle(
272 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
273 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
274 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
276 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
277 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
278 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
279 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
280 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
282 ; CHECK-NEXT: ret void
285 %uglygep = getelementptr i8, ptr %a, i32 84
288 for.body: ; preds = %for.body, %entry
289 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
290 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
291 store i32 1, ptr %lsr.iv1, align 4
292 %lsr.iv.next = add nsw i32 %lsr.iv, -1
293 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
294 %exitcond.not = icmp sle i32 %lsr.iv.next, 0
295 br i1 %exitcond.not, label %for.end, label %for.body
297 for.end: ; preds = %for.body
301 ; Due to SLT offset must be - 1604
302 define void @IcmpSlt(ptr %a) {
303 ; CHECK-LABEL: @IcmpSlt(
305 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
306 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1604
307 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
309 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
310 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
311 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
312 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
313 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
315 ; CHECK-NEXT: ret void
318 %uglygep = getelementptr i8, ptr %a, i32 84
321 for.body: ; preds = %for.body, %entry
322 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
323 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
324 store i32 1, ptr %lsr.iv1, align 4
325 %lsr.iv.next = add nsw i32 %lsr.iv, -1
326 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
327 %exitcond.not = icmp slt i32 %lsr.iv.next, 0
328 br i1 %exitcond.not, label %for.end, label %for.body
330 for.end: ; preds = %for.body
334 ; Invert predicate and branches
335 define void @IcmpSgt(ptr %a) {
336 ; CHECK-LABEL: @IcmpSgt(
338 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
339 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
340 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
342 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
343 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
344 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
345 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
346 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
348 ; CHECK-NEXT: ret void
351 %uglygep = getelementptr i8, ptr %a, i32 84
354 for.body: ; preds = %for.body, %entry
355 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
356 %lsr.iv = phi i32 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
357 store i32 1, ptr %lsr.iv1, align 4
358 %lsr.iv.next = add nsw i32 %lsr.iv, -1
359 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
360 %exitcond.not = icmp sgt i32 %lsr.iv.next, 0
361 br i1 %exitcond.not, label %for.body, label %for.end
363 for.end: ; preds = %for.body
367 ; Invert predicate and branches
368 define void @SeveralLoopLatch(ptr %a) {
369 ; CHECK-LABEL: @SeveralLoopLatch(
371 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
372 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
373 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
375 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[ANOTHER_BRANCH:%.*]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
376 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
377 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
378 ; CHECK-NEXT: br label [[ANOTHER_BRANCH]]
379 ; CHECK: another.branch:
380 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
381 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
383 ; CHECK-NEXT: ret void
386 %uglygep = getelementptr i8, ptr %a, i32 84
389 for.body: ; preds = %another.branch, %entry
390 %lsr.iv1 = phi ptr [ %uglygep2, %another.branch ], [ %uglygep, %entry ]
391 %lsr.iv = phi i32 [ %lsr.iv.next, %another.branch ], [ 379, %entry ]
392 store i32 1, ptr %lsr.iv1, align 4
393 %lsr.iv.next = add nsw i32 %lsr.iv, -1
394 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
395 br label %another.branch
398 %exitcond.not = icmp sgt i32 %lsr.iv.next, 0
399 br i1 %exitcond.not, label %for.body, label %for.end
401 for.end: ; preds = %for.body
405 ; Invert branch in SeveralLoopLatch
406 define void @SeveralLoopLatch2(ptr %a) {
407 ; CHECK-LABEL: @SeveralLoopLatch2(
409 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 84
410 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1600
411 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
413 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[ANOTHER_BRANCH:%.*]] ], [ [[UGLYGEP]], [[ENTRY:%.*]] ]
414 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
415 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i32 4
416 ; CHECK-NEXT: br label [[ANOTHER_BRANCH]]
417 ; CHECK: another.branch:
418 ; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq ptr [[UGLYGEP2]], [[SCEVGEP]]
419 ; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
421 ; CHECK-NEXT: ret void
424 %uglygep = getelementptr i8, ptr %a, i32 84
427 for.body: ; preds = %another.branch, %entry
428 %lsr.iv1 = phi ptr [ %uglygep2, %another.branch ], [ %uglygep, %entry ]
429 %lsr.iv = phi i32 [ %lsr.iv.next, %another.branch ], [ 379, %entry ]
430 store i32 1, ptr %lsr.iv1, align 4
431 %lsr.iv.next = add nsw i32 %lsr.iv, -1
432 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i32 4
433 br label %another.branch
436 %exitcond.not = icmp sle i32 %lsr.iv.next, 0
437 br i1 %exitcond.not, label %for.end, label %for.body
439 for.end: ; preds = %for.body
444 define void @non_branch_terminator(ptr %a) {
445 ; CHECK-LABEL: @non_branch_terminator(
447 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 84
448 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
450 ; CHECK-NEXT: [[LSR_IV2:%.*]] = phi i64 [ [[LSR_IV_NEXT3:%.*]], [[FOR_BODY]] ], [ 378, [[ENTRY:%.*]] ]
451 ; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[ENTRY]] ]
452 ; CHECK-NEXT: store i32 1, ptr [[LSR_IV1]], align 4
453 ; CHECK-NEXT: [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
454 ; CHECK-NEXT: [[LSR_IV_NEXT3]] = add nsw i64 [[LSR_IV2]], -1
455 ; CHECK-NEXT: switch i64 [[LSR_IV2]], label [[FOR_BODY]] [
456 ; CHECK-NEXT: i64 0, label [[FOR_END:%.*]]
459 ; CHECK-NEXT: ret void
462 %uglygep = getelementptr i8, ptr %a, i64 84
465 for.body: ; preds = %for.body, %entry
466 %lsr.iv1 = phi ptr [ %uglygep2, %for.body ], [ %uglygep, %entry ]
467 %lsr.iv = phi i64 [ %lsr.iv.next, %for.body ], [ 379, %entry ]
468 store i32 1, ptr %lsr.iv1, align 4
469 %lsr.iv.next = add nsw i64 %lsr.iv, -1
470 %uglygep2 = getelementptr i8, ptr %lsr.iv1, i64 4
471 switch i64 %lsr.iv.next, label %for.body [i64 0, label %for.end]
473 for.end: ; preds = %for.body