1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -indvars -S | FileCheck %s
4 ; Make sure that indvars can perform LFTR without a canonical IV.
6 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
8 ; Perform LFTR using the original pointer-type IV.
10 declare void @use(double %x)
12 ; for(char* p = base; p < base + n; ++p) {
15 define void @ptriv(i8* %base, i32 %n) nounwind {
16 ; CHECK-LABEL: @ptriv(
18 ; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[N:%.*]] to i64
19 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[BASE:%.*]], i64 [[IDX_EXT]]
20 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i8* [[BASE]], [[ADD_PTR]]
21 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
22 ; CHECK: for.body.preheader:
23 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
25 ; CHECK-NEXT: [[P_02:%.*]] = phi i8* [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[BASE]], [[FOR_BODY_PREHEADER]] ]
26 ; CHECK-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint i8* [[P_02]] to i64
27 ; CHECK-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint i8* [[BASE]] to i64
28 ; CHECK-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]]
29 ; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[SUB_PTR_SUB]] to i8
30 ; CHECK-NEXT: store i8 [[CONV]], i8* [[P_02]]
31 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[P_02]], i32 1
32 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i8* [[INCDEC_PTR]], [[ADD_PTR]]
33 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
34 ; CHECK: for.end.loopexit:
35 ; CHECK-NEXT: br label [[FOR_END]]
37 ; CHECK-NEXT: ret void
40 %idx.ext = sext i32 %n to i64
41 %add.ptr = getelementptr inbounds i8, i8* %base, i64 %idx.ext
42 %cmp1 = icmp ult i8* %base, %add.ptr
43 br i1 %cmp1, label %for.body, label %for.end
46 %p.02 = phi i8* [ %base, %entry ], [ %incdec.ptr, %for.body ]
47 ; cruft to make the IV useful
48 %sub.ptr.lhs.cast = ptrtoint i8* %p.02 to i64
49 %sub.ptr.rhs.cast = ptrtoint i8* %base to i64
50 %sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
51 %conv = trunc i64 %sub.ptr.sub to i8
52 store i8 %conv, i8* %p.02
53 %incdec.ptr = getelementptr inbounds i8, i8* %p.02, i32 1
54 %cmp = icmp ult i8* %incdec.ptr, %add.ptr
55 br i1 %cmp, label %for.body, label %for.end
61 ; This test checks that SCEVExpander can handle an outer loop that has been
62 ; simplified, and as a result the inner loop's exit test will be rewritten.
63 define void @expandOuterRecurrence(i32 %arg) nounwind {
64 ; CHECK-LABEL: @expandOuterRecurrence(
66 ; CHECK-NEXT: [[SUB1:%.*]] = sub nsw i32 [[ARG:%.*]], 1
67 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[SUB1]]
68 ; CHECK-NEXT: br i1 [[CMP1]], label [[OUTER_PREHEADER:%.*]], label [[EXIT:%.*]]
69 ; CHECK: outer.preheader:
70 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[ARG]], -1
71 ; CHECK-NEXT: br label [[OUTER:%.*]]
73 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[TMP0]], [[OUTER_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[OUTER_INC:%.*]] ]
74 ; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_INC:%.*]], [[OUTER_INC]] ], [ 0, [[OUTER_PREHEADER]] ]
75 ; CHECK-NEXT: [[SUB2:%.*]] = sub nsw i32 [[ARG]], [[I]]
76 ; CHECK-NEXT: [[SUB3:%.*]] = sub nsw i32 [[SUB2]], 1
77 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 0, [[SUB3]]
78 ; CHECK-NEXT: br i1 [[CMP2]], label [[INNER_PH:%.*]], label [[OUTER_INC]]
80 ; CHECK-NEXT: br label [[INNER:%.*]]
82 ; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[INNER_PH]] ], [ [[J_INC:%.*]], [[INNER]] ]
83 ; CHECK-NEXT: [[J_INC]] = add nuw nsw i32 [[J]], 1
84 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[J_INC]], [[INDVARS_IV]]
85 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[INNER]], label [[OUTER_INC_LOOPEXIT:%.*]]
86 ; CHECK: outer.inc.loopexit:
87 ; CHECK-NEXT: br label [[OUTER_INC]]
89 ; CHECK-NEXT: [[I_INC]] = add nuw nsw i32 [[I]], 1
90 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], -1
91 ; CHECK-NEXT: [[EXITCOND1:%.*]] = icmp ne i32 [[I_INC]], [[TMP0]]
92 ; CHECK-NEXT: br i1 [[EXITCOND1]], label [[OUTER]], label [[EXIT_LOOPEXIT:%.*]]
93 ; CHECK: exit.loopexit:
94 ; CHECK-NEXT: br label [[EXIT]]
96 ; CHECK-NEXT: ret void
99 %sub1 = sub nsw i32 %arg, 1
100 %cmp1 = icmp slt i32 0, %sub1
101 br i1 %cmp1, label %outer, label %exit
104 %i = phi i32 [ 0, %entry ], [ %i.inc, %outer.inc ]
105 %sub2 = sub nsw i32 %arg, %i
106 %sub3 = sub nsw i32 %sub2, 1
107 %cmp2 = icmp slt i32 0, %sub3
108 br i1 %cmp2, label %inner.ph, label %outer.inc
114 %j = phi i32 [ 0, %inner.ph ], [ %j.inc, %inner ]
115 %j.inc = add nsw i32 %j, 1
116 %cmp3 = icmp slt i32 %j.inc, %sub3
117 br i1 %cmp3, label %inner, label %outer.inc
120 %i.inc = add nsw i32 %i, 1
121 %cmp4 = icmp slt i32 %i.inc, %sub1
122 br i1 %cmp4, label %outer, label %exit
128 ; Force SCEVExpander to look for an existing well-formed phi.
129 ; Perform LFTR without generating extra preheader code.
130 define void @guardedloop([0 x double]* %matrix, [0 x double]* %vector,
131 ; CHECK-LABEL: @guardedloop(
133 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 1, [[IROW:%.*]]
134 ; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_PREHEADER:%.*]], label [[RETURN:%.*]]
135 ; CHECK: loop.preheader:
136 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[ILEAD:%.*]] to i64
137 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[IROW]] to i64
138 ; CHECK-NEXT: br label [[LOOP:%.*]]
140 ; CHECK-NEXT: [[INDVARS_IV2:%.*]] = phi i64 [ 0, [[LOOP_PREHEADER]] ], [ [[INDVARS_IV_NEXT3:%.*]], [[LOOP]] ]
141 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[LOOP_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ]
142 ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[INDVARS_IV]], [[INDVARS_IV2]]
143 ; CHECK-NEXT: [[MATRIXP:%.*]] = getelementptr inbounds [0 x double], [0 x double]* [[MATRIX:%.*]], i32 0, i64 [[TMP1]]
144 ; CHECK-NEXT: [[V1:%.*]] = load double, double* [[MATRIXP]]
145 ; CHECK-NEXT: call void @use(double [[V1]])
146 ; CHECK-NEXT: [[VECTORP:%.*]] = getelementptr inbounds [0 x double], [0 x double]* [[VECTOR:%.*]], i32 0, i64 [[INDVARS_IV2]]
147 ; CHECK-NEXT: [[V2:%.*]] = load double, double* [[VECTORP]]
148 ; CHECK-NEXT: call void @use(double [[V2]])
149 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
150 ; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV2]], 1
151 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT3]], [[WIDE_TRIP_COUNT]]
152 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[RETURN_LOOPEXIT:%.*]]
153 ; CHECK: return.loopexit:
154 ; CHECK-NEXT: br label [[RETURN]]
156 ; CHECK-NEXT: ret void
158 i32 %irow, i32 %ilead) nounwind {
160 %cmp = icmp slt i32 1, %irow
161 br i1 %cmp, label %loop, label %return
164 %rowidx = phi i32 [ 0, %entry ], [ %row.inc, %loop ]
165 %i = phi i32 [ 0, %entry ], [ %i.inc, %loop ]
166 %diagidx = add nsw i32 %rowidx, %i
167 %diagidxw = sext i32 %diagidx to i64
168 %matrixp = getelementptr inbounds [0 x double], [0 x double]* %matrix, i32 0, i64 %diagidxw
169 %v1 = load double, double* %matrixp
170 call void @use(double %v1)
171 %iw = sext i32 %i to i64
172 %vectorp = getelementptr inbounds [0 x double], [0 x double]* %vector, i32 0, i64 %iw
173 %v2 = load double, double* %vectorp
174 call void @use(double %v2)
175 %row.inc = add nsw i32 %rowidx, %ilead
176 %i.inc = add nsw i32 %i, 1
177 %cmp196 = icmp slt i32 %i.inc, %irow
178 br i1 %cmp196, label %loop, label %return
184 ; Avoid generating extra code to materialize a trip count. Skip LFTR.
185 define void @unguardedloop([0 x double]* %matrix, [0 x double]* %vector,
186 ; CHECK-LABEL: @unguardedloop(
188 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[IROW:%.*]] to i64
189 ; CHECK-NEXT: br label [[LOOP:%.*]]
191 ; CHECK-NEXT: [[INDVARS_IV2:%.*]] = phi i64 [ [[INDVARS_IV_NEXT3:%.*]], [[LOOP]] ], [ 0, [[ENTRY:%.*]] ]
192 ; CHECK-NEXT: [[INDVARS_IV_NEXT3]] = add nuw nsw i64 [[INDVARS_IV2]], 1
193 ; CHECK-NEXT: [[CMP196:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT3]], [[TMP0]]
194 ; CHECK-NEXT: br i1 [[CMP196]], label [[LOOP]], label [[RETURN:%.*]]
196 ; CHECK-NEXT: ret void
198 i32 %irow, i32 %ilead) nounwind {
203 %rowidx = phi i32 [ 0, %entry ], [ %row.inc, %loop ]
204 %i = phi i32 [ 0, %entry ], [ %i.inc, %loop ]
205 %diagidx = add nsw i32 %rowidx, %i
206 %diagidxw = sext i32 %diagidx to i64
207 %matrixp = getelementptr inbounds [0 x double], [0 x double]* %matrix, i32 0, i64 %diagidxw
208 %v1 = load double, double* %matrixp
209 %iw = sext i32 %i to i64
210 %vectorp = getelementptr inbounds [0 x double], [0 x double]* %vector, i32 0, i64 %iw
211 %v2 = load double, double* %vectorp
212 %row.inc = add nsw i32 %rowidx, %ilead
213 %i.inc = add nsw i32 %i, 1
214 %cmp196 = icmp slt i32 %i.inc, %irow
215 br i1 %cmp196, label %loop, label %return
221 ; Remove %i which is only used by the exit test.
222 ; Verify that SCEV can still compute a backedge count from the sign
223 ; extended %n, used for pointer comparison by LFTR.
225 ; TODO: Fix for PR13371 currently makes this impossible. See
226 ; IndVarSimplify.cpp hasConcreteDef(). We may want to change to undef rules.
227 define void @geplftr(i8* %base, i32 %x, i32 %y, i32 %n) nounwind {
228 ; CHECK-LABEL: @geplftr(
230 ; CHECK-NEXT: [[X_EXT:%.*]] = sext i32 [[X:%.*]] to i64
231 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[BASE:%.*]], i64 [[X_EXT]]
232 ; CHECK-NEXT: [[Y_EXT:%.*]] = sext i32 [[Y:%.*]] to i64
233 ; CHECK-NEXT: [[ADD_PTR10:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 [[Y_EXT]]
234 ; CHECK-NEXT: [[LIM:%.*]] = add i32 [[X]], [[N:%.*]]
235 ; CHECK-NEXT: [[CMP_PH:%.*]] = icmp ult i32 [[X]], [[LIM]]
236 ; CHECK-NEXT: br i1 [[CMP_PH]], label [[LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
237 ; CHECK: loop.preheader:
238 ; CHECK-NEXT: br label [[LOOP:%.*]]
240 ; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[INC:%.*]], [[LOOP]] ], [ [[X]], [[LOOP_PREHEADER]] ]
241 ; CHECK-NEXT: [[APTR:%.*]] = phi i8* [ [[INCDEC_PTR:%.*]], [[LOOP]] ], [ [[ADD_PTR10]], [[LOOP_PREHEADER]] ]
242 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[APTR]], i32 1
243 ; CHECK-NEXT: store i8 3, i8* [[APTR]]
244 ; CHECK-NEXT: [[INC]] = add nuw i32 [[I]], 1
245 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], [[LIM]]
246 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
247 ; CHECK: exit.loopexit:
248 ; CHECK-NEXT: br label [[EXIT]]
250 ; CHECK-NEXT: ret void
253 %x.ext = sext i32 %x to i64
254 %add.ptr = getelementptr inbounds i8, i8* %base, i64 %x.ext
255 %y.ext = sext i32 %y to i64
256 %add.ptr10 = getelementptr inbounds i8, i8* %add.ptr, i64 %y.ext
257 %lim = add i32 %x, %n
258 %cmp.ph = icmp ult i32 %x, %lim
259 br i1 %cmp.ph, label %loop, label %exit
261 %i = phi i32 [ %x, %entry ], [ %inc, %loop ]
262 %aptr = phi i8* [ %add.ptr10, %entry ], [ %incdec.ptr, %loop ]
263 %incdec.ptr = getelementptr inbounds i8, i8* %aptr, i32 1
264 store i8 3, i8* %aptr
266 %cmp = icmp ult i32 %inc, %lim
267 br i1 %cmp, label %loop, label %exit
273 ; Exercise backedge taken count verification with a never-taken loop.
274 define void @nevertaken() nounwind uwtable ssp {
275 ; CHECK-LABEL: @nevertaken(
277 ; CHECK-NEXT: br label [[LOOP:%.*]]
279 ; CHECK-NEXT: br i1 false, label [[LOOP]], label [[EXIT:%.*]]
281 ; CHECK-NEXT: ret void
286 %i = phi i32 [ 0, %entry ], [ %inc, %loop ]
287 %inc = add nsw i32 %i, 1
288 %cmp = icmp sle i32 %inc, 0
289 br i1 %cmp, label %loop, label %exit
295 ; Test LFTR on an IV whose recurrence start is a non-unit pointer type.
296 define void @aryptriv([256 x i8]* %base, i32 %n) nounwind {
297 ; CHECK-LABEL: @aryptriv(
299 ; CHECK-NEXT: [[IVSTART:%.*]] = getelementptr inbounds [256 x i8], [256 x i8]* [[BASE:%.*]], i32 0, i32 0
300 ; CHECK-NEXT: [[IVEND:%.*]] = getelementptr inbounds [256 x i8], [256 x i8]* [[BASE]], i32 0, i32 [[N:%.*]]
301 ; CHECK-NEXT: [[CMP_PH:%.*]] = icmp ult i8* [[IVSTART]], [[IVEND]]
302 ; CHECK-NEXT: br i1 [[CMP_PH]], label [[LOOP_PREHEADER:%.*]], label [[EXIT:%.*]]
303 ; CHECK: loop.preheader:
304 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[N]] to i64
305 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr [256 x i8], [256 x i8]* [[BASE]], i64 0, i64 [[TMP0]]
306 ; CHECK-NEXT: br label [[LOOP:%.*]]
308 ; CHECK-NEXT: [[APTR:%.*]] = phi i8* [ [[INCDEC_PTR:%.*]], [[LOOP]] ], [ [[IVSTART]], [[LOOP_PREHEADER]] ]
309 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[APTR]], i32 1
310 ; CHECK-NEXT: store i8 3, i8* [[APTR]]
311 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i8* [[INCDEC_PTR]], [[SCEVGEP]]
312 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
313 ; CHECK: exit.loopexit:
314 ; CHECK-NEXT: br label [[EXIT]]
316 ; CHECK-NEXT: ret void
319 %ivstart = getelementptr inbounds [256 x i8], [256 x i8]* %base, i32 0, i32 0
320 %ivend = getelementptr inbounds [256 x i8], [256 x i8]* %base, i32 0, i32 %n
321 %cmp.ph = icmp ult i8* %ivstart, %ivend
322 br i1 %cmp.ph, label %loop, label %exit
325 %aptr = phi i8* [ %ivstart, %entry ], [ %incdec.ptr, %loop ]
326 %incdec.ptr = getelementptr inbounds i8, i8* %aptr, i32 1
327 store i8 3, i8* %aptr
328 %cmp = icmp ult i8* %incdec.ptr, %ivend
329 br i1 %cmp, label %loop, label %exit