1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=indvars -S | FileCheck %s
4 target datalayout = "e-m:e-i64:64-p:64:64:64-n8:16:32:64-S128"
6 ; When widening IV and its users, trunc and zext/sext are not needed
7 ; if the original 32-bit user is known to be non-negative, whether
8 ; the IV is considered signed or unsigned.
9 define void @foo(ptr %A, ptr %B, ptr %C, i32 %N) {
12 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[N:%.*]]
13 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
14 ; CHECK: for.body.lr.ph:
15 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
16 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
18 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
19 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
20 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
21 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
22 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP1]]
23 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
24 ; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP0]], [[TMP2]]
25 ; CHECK-NEXT: [[TMP3:%.*]] = trunc nuw nsw i64 [[TMP1]] to i32
26 ; CHECK-NEXT: [[DIV0:%.*]] = udiv i32 5, [[TMP3]]
27 ; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD3]], [[DIV0]]
28 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
29 ; CHECK-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX5]], align 4
30 ; CHECK-NEXT: br label [[FOR_INC]]
32 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
33 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
34 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
35 ; CHECK: for.cond.for.end_crit_edge:
36 ; CHECK-NEXT: br label [[FOR_END]]
38 ; CHECK-NEXT: ret void
41 %cmp1 = icmp slt i32 0, %N
42 br i1 %cmp1, label %for.body.lr.ph, label %for.end
44 for.body.lr.ph: ; preds = %entry
47 for.body: ; preds = %for.body.lr.ph, %for.inc
48 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
49 %idxprom = sext i32 %i.02 to i64
50 %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
51 %0 = load i32, ptr %arrayidx, align 4
52 %add = add nsw i32 %i.02, 2
53 %idxprom1 = zext i32 %add to i64
54 %arrayidx2 = getelementptr inbounds i32, ptr %C, i64 %idxprom1
55 %1 = load i32, ptr %arrayidx2, align 4
56 %add3 = add nsw i32 %0, %1
57 %div0 = udiv i32 5, %add
58 %add4 = add nsw i32 %add3, %div0
59 %idxprom4 = zext i32 %i.02 to i64
60 %arrayidx5 = getelementptr inbounds i32, ptr %A, i64 %idxprom4
61 store i32 %add4, ptr %arrayidx5, align 4
64 for.inc: ; preds = %for.body
65 %inc = add nsw i32 %i.02, 1
66 %cmp = icmp slt i32 %inc, %N
67 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
69 for.cond.for.end_crit_edge: ; preds = %for.inc
72 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
76 define void @foo1(ptr %A, ptr %B, ptr %C, i32 %N) {
79 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[N:%.*]]
80 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
81 ; CHECK: for.body.lr.ph:
82 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
83 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
85 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
86 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
87 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
88 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
89 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP1]]
90 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
91 ; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP0]], [[TMP2]]
92 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV]]
93 ; CHECK-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX5]], align 4
94 ; CHECK-NEXT: br label [[FOR_INC]]
96 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
97 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
98 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
99 ; CHECK: for.cond.for.end_crit_edge:
100 ; CHECK-NEXT: br label [[FOR_END]]
102 ; CHECK-NEXT: ret void
105 %cmp1 = icmp slt i32 0, %N
106 br i1 %cmp1, label %for.body.lr.ph, label %for.end
108 for.body.lr.ph: ; preds = %entry
111 for.body: ; preds = %for.body.lr.ph, %for.inc
112 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
113 %idxprom = zext i32 %i.02 to i64
114 %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
115 %0 = load i32, ptr %arrayidx, align 4
116 %add = add nsw i32 %i.02, 2
117 %idxprom1 = sext i32 %add to i64
118 %arrayidx2 = getelementptr inbounds i32, ptr %C, i64 %idxprom1
119 %1 = load i32, ptr %arrayidx2, align 4
120 %add3 = add nsw i32 %0, %1
121 %idxprom4 = sext i32 %i.02 to i64
122 %arrayidx5 = getelementptr inbounds i32, ptr %A, i64 %idxprom4
123 store i32 %add3, ptr %arrayidx5, align 4
126 for.inc: ; preds = %for.body
127 %inc = add nsw i32 %i.02, 1
128 %cmp = icmp slt i32 %inc, %N
129 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
131 for.cond.for.end_crit_edge: ; preds = %for.inc
134 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
139 @a = common global [100 x i32] zeroinitializer, align 16
140 @b = common global [100 x i32] zeroinitializer, align 16
142 define i32 @foo2(i32 %M) {
143 ; CHECK-LABEL: @foo2(
145 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[M:%.*]]
146 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
147 ; CHECK: for.body.lr.ph:
148 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[M]] to i64
149 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[M]] to i64
150 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
152 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
153 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]]
154 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
155 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr @b, i64 0, i64 [[INDVARS_IV]]
156 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
157 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
158 ; CHECK-NEXT: [[TMP3:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
159 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 [[TMP3]]
160 ; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX5]], align 4
161 ; CHECK-NEXT: br label [[FOR_INC]]
163 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
164 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
165 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
166 ; CHECK: for.cond.for.end_crit_edge:
167 ; CHECK-NEXT: br label [[FOR_END]]
169 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @dummy(ptr @a, ptr @b)
170 ; CHECK-NEXT: ret i32 0
173 %cmp1 = icmp slt i32 0, %M
174 br i1 %cmp1, label %for.body.lr.ph, label %for.end
176 for.body.lr.ph: ; preds = %entry
179 for.body: ; preds = %for.body.lr.ph, %for.inc
180 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
181 %idxprom = zext i32 %i.02 to i64
182 %arrayidx = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 %idxprom
183 %0 = load i32, ptr %arrayidx, align 4
184 %idxprom1 = sext i32 %i.02 to i64
185 %arrayidx2 = getelementptr inbounds [100 x i32], ptr @b, i64 0, i64 %idxprom1
186 %1 = load i32, ptr %arrayidx2, align 4
187 %add = add nsw i32 %0, %1
188 %add3 = add nsw i32 %i.02, %M
189 %idxprom4 = sext i32 %add3 to i64
190 %arrayidx5 = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 %idxprom4
191 store i32 %add, ptr %arrayidx5, align 4
194 for.inc: ; preds = %for.body
195 %inc = add nsw i32 %i.02, 1
196 %cmp = icmp slt i32 %inc, %M
197 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
199 for.cond.for.end_crit_edge: ; preds = %for.inc
202 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
203 %call = call i32 @dummy(ptr @a, ptr @b)
207 declare i32 @dummy(ptr, ptr)
209 ; A case where zext should not be eliminated when its operands could only be extended by sext.
210 define i32 @foo3(i32 %M) {
211 ; CHECK-LABEL: @foo3(
213 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[M:%.*]]
214 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
215 ; CHECK: for.body.lr.ph:
216 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[M]] to i64
217 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[M]] to i64
218 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
220 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
221 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 [[INDVARS_IV]]
222 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
223 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr @b, i64 0, i64 [[INDVARS_IV]]
224 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
225 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
226 ; CHECK-NEXT: [[TMP3:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
227 ; CHECK-NEXT: [[TMP4:%.*]] = trunc nsw i64 [[TMP3]] to i32
228 ; CHECK-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP4]] to i64
229 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 [[IDXPROM4]]
230 ; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX5]], align 4
231 ; CHECK-NEXT: br label [[FOR_INC]]
233 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
234 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
235 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
236 ; CHECK: for.cond.for.end_crit_edge:
237 ; CHECK-NEXT: br label [[FOR_END]]
239 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @dummy(ptr @a, ptr @b)
240 ; CHECK-NEXT: ret i32 0
243 %cmp1 = icmp slt i32 0, %M
244 br i1 %cmp1, label %for.body.lr.ph, label %for.end
246 for.body.lr.ph: ; preds = %entry
249 for.body: ; preds = %for.body.lr.ph, %for.inc
250 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
251 %idxprom = sext i32 %i.02 to i64
252 %arrayidx = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 %idxprom
253 %0 = load i32, ptr %arrayidx, align 4
254 %idxprom1 = sext i32 %i.02 to i64
255 %arrayidx2 = getelementptr inbounds [100 x i32], ptr @b, i64 0, i64 %idxprom1
256 %1 = load i32, ptr %arrayidx2, align 4
257 %add = add nsw i32 %0, %1
258 %add3 = add nsw i32 %i.02, %M
259 %idxprom4 = zext i32 %add3 to i64
260 %arrayidx5 = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 %idxprom4
261 store i32 %add, ptr %arrayidx5, align 4
264 for.inc: ; preds = %for.body
265 %inc = add nsw i32 %i.02, 1
266 %cmp = icmp slt i32 %inc, %M
267 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
269 for.cond.for.end_crit_edge: ; preds = %for.inc
272 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
273 %call = call i32 @dummy(ptr @a, ptr @b)
277 %struct.image = type {i32, i32}
278 define i32 @foo4(ptr %input, i32 %length, ptr %in) {
279 ; CHECK-LABEL: @foo4(
281 ; CHECK-NEXT: [[STRIDE:%.*]] = getelementptr inbounds [[STRUCT_IMAGE:%.*]], ptr [[INPUT:%.*]], i64 0, i32 1
282 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[STRIDE]], align 4
283 ; CHECK-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[LENGTH:%.*]], 1
284 ; CHECK-NEXT: br i1 [[CMP17]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
285 ; CHECK: for.body.lr.ph:
286 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
287 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LENGTH]] to i64
288 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
289 ; CHECK: for.cond.cleanup.loopexit:
290 ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[TMP10:%.*]], [[FOR_BODY]] ]
291 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
292 ; CHECK: for.cond.cleanup:
293 ; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
294 ; CHECK-NEXT: ret i32 [[TMP3]]
296 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 1, [[FOR_BODY_LR_PH]] ]
297 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
298 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[INPUT]], align 8
299 ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
300 ; CHECK-NEXT: [[TMP6:%.*]] = mul nsw i64 [[TMP5]], [[INDVARS_IV_NEXT]]
301 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 [[TMP6]]
302 ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ADD_PTR]], align 4
303 ; CHECK-NEXT: [[TMP8:%.*]] = mul nsw i64 [[TMP1]], [[INDVARS_IV_NEXT]]
304 ; CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[TMP8]]
305 ; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ADD_PTR1]], align 4
306 ; CHECK-NEXT: [[TMP10]] = add i32 [[TMP7]], [[TMP9]]
307 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
308 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]]
311 %stride = getelementptr inbounds %struct.image, ptr %input, i64 0, i32 1
312 %0 = load i32, ptr %stride, align 4
313 %cmp17 = icmp sgt i32 %length, 1
314 br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
316 for.body.lr.ph: ; preds = %entry
319 for.cond.cleanup.loopexit: ; preds = %for.body
320 %1 = phi i32 [ %6, %for.body ]
321 br label %for.cond.cleanup
323 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
324 %2 = phi i32 [ 0, %entry ], [ %1, %for.cond.cleanup.loopexit ]
327 ; mul instruction below is widened instead of generating a truncate instruction for it
328 ; regardless if Load operand of mul is inside or outside the loop (we have both cases).
329 for.body: ; preds = %for.body.lr.ph, %for.body
330 %x.018 = phi i32 [ 1, %for.body.lr.ph ], [ %add, %for.body ]
331 %add = add nuw nsw i32 %x.018, 1
332 %3 = load i32, ptr %input, align 8
333 %mul = mul nsw i32 %3, %add
334 %idx.ext = sext i32 %mul to i64
335 %add.ptr = getelementptr inbounds i32, ptr %in, i64 %idx.ext
336 %4 = load i32, ptr %add.ptr, align 4
337 %mul1 = mul nsw i32 %0, %add
338 %idx.ext1 = sext i32 %mul1 to i64
339 %add.ptr1 = getelementptr inbounds i32, ptr %in, i64 %idx.ext1
340 %5 = load i32, ptr %add.ptr1, align 4
342 %cmp = icmp slt i32 %add, %length
343 br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
347 define i32 @foo5(ptr %input, i32 %length, ptr %in) {
348 ; CHECK-LABEL: @foo5(
350 ; CHECK-NEXT: [[STRIDE:%.*]] = getelementptr inbounds [[STRUCT_IMAGE:%.*]], ptr [[INPUT:%.*]], i64 0, i32 1
351 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[STRIDE]], align 4
352 ; CHECK-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[LENGTH:%.*]], 1
353 ; CHECK-NEXT: br i1 [[CMP17]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
354 ; CHECK: for.body.lr.ph:
355 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
356 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LENGTH]] to i64
357 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
358 ; CHECK: for.cond.cleanup.loopexit:
359 ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[TMP10:%.*]], [[FOR_BODY]] ]
360 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
361 ; CHECK: for.cond.cleanup:
362 ; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
363 ; CHECK-NEXT: ret i32 [[TMP3]]
365 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 1, [[FOR_BODY_LR_PH]] ]
366 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
367 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[INPUT]], align 8
368 ; CHECK-NEXT: [[TMP5:%.*]] = trunc nuw nsw i64 [[INDVARS_IV_NEXT]] to i32
369 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], [[TMP5]]
370 ; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[MUL]] to i64
371 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 [[IDX_EXT]]
372 ; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ADD_PTR]], align 4
373 ; CHECK-NEXT: [[TMP7:%.*]] = mul nsw i64 [[TMP1]], [[INDVARS_IV_NEXT]]
374 ; CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[TMP7]]
375 ; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ADD_PTR1]], align 4
376 ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP6]], [[TMP8]]
377 ; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[MUL]]
378 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
379 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]]
382 %stride = getelementptr inbounds %struct.image, ptr %input, i64 0, i32 1
383 %0 = load i32, ptr %stride, align 4
384 %cmp17 = icmp sgt i32 %length, 1
385 br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
387 for.body.lr.ph: ; preds = %entry
390 for.cond.cleanup.loopexit: ; preds = %for.body
391 %1 = phi i32 [ %7, %for.body ]
392 br label %for.cond.cleanup
394 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
395 %2 = phi i32 [ 0, %entry ], [ %1, %for.cond.cleanup.loopexit ]
398 ; This example is the same as above except that the first mul is used in two places
399 ; and this may result in having two versions of the multiply: an i32 and i64 version.
400 ; In this case, keep the trucate instructions to avoid this redundancy.
401 for.body: ; preds = %for.body.lr.ph, %for.body
402 %x.018 = phi i32 [ 1, %for.body.lr.ph ], [ %add, %for.body ]
403 %add = add nuw nsw i32 %x.018, 1
404 %3 = load i32, ptr %input, align 8
405 %mul = mul nsw i32 %3, %add
406 %idx.ext = sext i32 %mul to i64
407 %add.ptr = getelementptr inbounds i32, ptr %in, i64 %idx.ext
408 %4 = load i32, ptr %add.ptr, align 4
409 %mul1 = mul nsw i32 %0, %add
410 %idx.ext1 = sext i32 %mul1 to i64
411 %add.ptr1 = getelementptr inbounds i32, ptr %in, i64 %idx.ext1
412 %5 = load i32, ptr %add.ptr1, align 4
414 %7 = add i32 %6, %mul
415 %cmp = icmp slt i32 %add, %length
416 br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
419 define i32 @foo6(ptr %input, i32 %length, ptr %in) {
420 ; CHECK-LABEL: @foo6(
422 ; CHECK-NEXT: [[STRIDE:%.*]] = getelementptr inbounds [[STRUCT_IMAGE:%.*]], ptr [[INPUT:%.*]], i64 0, i32 1
423 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[STRIDE]], align 4
424 ; CHECK-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[LENGTH:%.*]], 1
425 ; CHECK-NEXT: br i1 [[CMP17]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
426 ; CHECK: for.body.lr.ph:
427 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
428 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LENGTH]] to i64
429 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
430 ; CHECK: for.cond.cleanup.loopexit:
431 ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[TMP12:%.*]], [[FOR_BODY]] ]
432 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
433 ; CHECK: for.cond.cleanup:
434 ; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
435 ; CHECK-NEXT: ret i32 [[TMP3]]
437 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 1, [[FOR_BODY_LR_PH]] ]
438 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
439 ; CHECK-NEXT: [[TMP4:%.*]] = and i32 [[LENGTH]], [[TMP0]]
440 ; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
441 ; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], [[INDVARS_IV_NEXT]]
442 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, ptr [[IN:%.*]], i64 [[TMP6]]
443 ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ADD_PTR]], align 4
444 ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP1]], [[INDVARS_IV_NEXT]]
445 ; CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[TMP8]]
446 ; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ADD_PTR1]], align 4
447 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[LENGTH]], [[TMP9]]
448 ; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[OR]] to i64
449 ; CHECK-NEXT: [[TMP11:%.*]] = sub nuw i64 [[TMP10]], [[INDVARS_IV_NEXT]]
450 ; CHECK-NEXT: [[PTR_OR:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 [[TMP11]]
451 ; CHECK-NEXT: [[VAL_OR:%.*]] = load i32, ptr [[PTR_OR]], align 4
452 ; CHECK-NEXT: [[TMP12]] = add i32 [[TMP7]], [[VAL_OR]]
453 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
454 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]]
457 %stride = getelementptr inbounds %struct.image, ptr %input, i64 0, i32 1
458 %0 = load i32, ptr %stride, align 4
459 %cmp17 = icmp sgt i32 %length, 1
460 br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
462 for.body.lr.ph: ; preds = %entry
465 for.cond.cleanup.loopexit: ; preds = %for.body
466 %1 = phi i32 [ %6, %for.body ]
467 br label %for.cond.cleanup
469 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
470 %2 = phi i32 [ 0, %entry ], [ %1, %for.cond.cleanup.loopexit ]
473 ; Extend foo4 so that any loop variants (%3 and %or) with mul/sub/add then extend will not
474 ; need a trunc instruction
475 for.body: ; preds = %for.body.lr.ph, %for.body
476 %x.018 = phi i32 [ 1, %for.body.lr.ph ], [ %add, %for.body ]
477 %add = add nuw nsw i32 %x.018, 1
478 %3 = and i32 %length, %0
479 %mul = mul nuw i32 %3, %add
480 %idx.ext = zext i32 %mul to i64
481 %add.ptr = getelementptr inbounds i32, ptr %in, i64 %idx.ext
482 %4 = load i32, ptr %add.ptr, align 4
483 %mul1 = mul nuw i32 %0, %add
484 %idx.ext1 = zext i32 %mul1 to i64
485 %add.ptr1 = getelementptr inbounds i32, ptr %in, i64 %idx.ext1
486 %5 = load i32, ptr %add.ptr1, align 4
487 %or = or i32 %length, %5
488 %sub.or = sub nuw i32 %or, %add
489 %or.ext = zext i32 %sub.or to i64
490 %ptr.or = getelementptr inbounds i32, ptr %in, i64 %or.ext
491 %val.or = load i32, ptr %ptr.or
492 %6 = add i32 %4, %val.or
493 %cmp = icmp ult i32 %add, %length
494 br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
497 ; Test that we can handle shl and disjoint or in getExtendedOperandRecurrence.
498 define void @foo7(i32 %n, ptr %a, i32 %x) {
499 ; CHECK-LABEL: @foo7(
501 ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
502 ; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
503 ; CHECK: for.body.lr.ph:
504 ; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], 2
505 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[ADD1]] to i64
506 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[N]] to i64
507 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
508 ; CHECK: for.cond.cleanup.loopexit:
509 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
510 ; CHECK: for.cond.cleanup:
511 ; CHECK-NEXT: ret void
513 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_LR_PH]] ]
514 ; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
515 ; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i64 [[TMP2]], 1
516 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP3]]
517 ; CHECK-NEXT: [[TMP4:%.*]] = trunc nsw i64 [[INDVARS_IV]] to i32
518 ; CHECK-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4
519 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
520 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], [[TMP1]]
521 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]]
524 %cmp6 = icmp sgt i32 %n, 0
525 br i1 %cmp6, label %for.body.lr.ph, label %for.cond.cleanup
527 for.body.lr.ph: ; preds = %entry
528 %add1 = add nsw i32 %x, 2
531 for.cond.cleanup.loopexit: ; preds = %for.body
532 br label %for.cond.cleanup
534 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
537 for.body: ; preds = %for.body.lr.ph, %for.body
538 %i.07 = phi i32 [ 0, %for.body.lr.ph ], [ %add2, %for.body ]
539 %mul = shl nsw i32 %i.07, 1
540 %add = or disjoint i32 %mul, 1
541 %idxprom = sext i32 %add to i64
542 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom
543 store i32 %i.07, ptr %arrayidx, align 4
544 %add2 = add nsw i32 %add1, %i.07
545 %cmp = icmp slt i32 %add2, %n
546 br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit