1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -indvars -S | FileCheck %s
4 target datalayout = "e-m:e-i64:64-p:64:64:64-n8:16:32:64-S128"
6 ; When widening IV and its users, trunc and zext/sext are not needed
7 ; if the original 32-bit user is known to be non-negative, whether
8 ; the IV is considered signed or unsigned.
9 define void @foo(i32* %A, i32* %B, i32* %C, i32 %N) {
12 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[N:%.*]]
13 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
14 ; CHECK: for.body.lr.ph:
15 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
16 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
18 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
19 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
20 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
21 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
22 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[TMP1]]
23 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
24 ; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP0]], [[TMP2]]
25 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP1]] to i32
26 ; CHECK-NEXT: [[DIV0:%.*]] = udiv i32 5, [[TMP3]]
27 ; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD3]], [[DIV0]]
28 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
29 ; CHECK-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX5]], align 4
30 ; CHECK-NEXT: br label [[FOR_INC]]
32 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
33 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
34 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
35 ; CHECK: for.cond.for.end_crit_edge:
36 ; CHECK-NEXT: br label [[FOR_END]]
38 ; CHECK-NEXT: ret void
41 %cmp1 = icmp slt i32 0, %N
42 br i1 %cmp1, label %for.body.lr.ph, label %for.end
44 for.body.lr.ph: ; preds = %entry
47 for.body: ; preds = %for.body.lr.ph, %for.inc
48 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
49 %idxprom = sext i32 %i.02 to i64
50 %arrayidx = getelementptr inbounds i32, i32* %B, i64 %idxprom
51 %0 = load i32, i32* %arrayidx, align 4
52 %add = add nsw i32 %i.02, 2
53 %idxprom1 = zext i32 %add to i64
54 %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %idxprom1
55 %1 = load i32, i32* %arrayidx2, align 4
56 %add3 = add nsw i32 %0, %1
57 %div0 = udiv i32 5, %add
58 %add4 = add nsw i32 %add3, %div0
59 %idxprom4 = zext i32 %i.02 to i64
60 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
61 store i32 %add4, i32* %arrayidx5, align 4
64 for.inc: ; preds = %for.body
65 %inc = add nsw i32 %i.02, 1
66 %cmp = icmp slt i32 %inc, %N
67 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
69 for.cond.for.end_crit_edge: ; preds = %for.inc
72 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
76 define void @foo1(i32* %A, i32* %B, i32* %C, i32 %N) {
79 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[N:%.*]]
80 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
81 ; CHECK: for.body.lr.ph:
82 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64
83 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
85 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
86 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
87 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
88 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
89 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[TMP1]]
90 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
91 ; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP0]], [[TMP2]]
92 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
93 ; CHECK-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX5]], align 4
94 ; CHECK-NEXT: br label [[FOR_INC]]
96 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
97 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
98 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
99 ; CHECK: for.cond.for.end_crit_edge:
100 ; CHECK-NEXT: br label [[FOR_END]]
102 ; CHECK-NEXT: ret void
105 %cmp1 = icmp slt i32 0, %N
106 br i1 %cmp1, label %for.body.lr.ph, label %for.end
108 for.body.lr.ph: ; preds = %entry
111 for.body: ; preds = %for.body.lr.ph, %for.inc
112 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
113 %idxprom = zext i32 %i.02 to i64
114 %arrayidx = getelementptr inbounds i32, i32* %B, i64 %idxprom
115 %0 = load i32, i32* %arrayidx, align 4
116 %add = add nsw i32 %i.02, 2
117 %idxprom1 = sext i32 %add to i64
118 %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %idxprom1
119 %1 = load i32, i32* %arrayidx2, align 4
120 %add3 = add nsw i32 %0, %1
121 %idxprom4 = sext i32 %i.02 to i64
122 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
123 store i32 %add3, i32* %arrayidx5, align 4
126 for.inc: ; preds = %for.body
127 %inc = add nsw i32 %i.02, 1
128 %cmp = icmp slt i32 %inc, %N
129 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
131 for.cond.for.end_crit_edge: ; preds = %for.inc
134 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
139 @a = common global [100 x i32] zeroinitializer, align 16
140 @b = common global [100 x i32] zeroinitializer, align 16
142 define i32 @foo2(i32 %M) {
143 ; CHECK-LABEL: @foo2(
145 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[M:%.*]]
146 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
147 ; CHECK: for.body.lr.ph:
148 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[M]] to i64
149 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[M]] to i64
150 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
152 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
153 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 [[INDVARS_IV]]
154 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
155 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @b, i64 0, i64 [[INDVARS_IV]]
156 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
157 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
158 ; CHECK-NEXT: [[TMP3:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
159 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 [[TMP3]]
160 ; CHECK-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX5]], align 4
161 ; CHECK-NEXT: br label [[FOR_INC]]
163 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
164 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
165 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
166 ; CHECK: for.cond.for.end_crit_edge:
167 ; CHECK-NEXT: br label [[FOR_END]]
169 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @dummy(i32* getelementptr inbounds ([100 x i32], [100 x i32]* @a, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @b, i32 0, i32 0))
170 ; CHECK-NEXT: ret i32 0
173 %cmp1 = icmp slt i32 0, %M
174 br i1 %cmp1, label %for.body.lr.ph, label %for.end
176 for.body.lr.ph: ; preds = %entry
179 for.body: ; preds = %for.body.lr.ph, %for.inc
180 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
181 %idxprom = zext i32 %i.02 to i64
182 %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 %idxprom
183 %0 = load i32, i32* %arrayidx, align 4
184 %idxprom1 = sext i32 %i.02 to i64
185 %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* @b, i64 0, i64 %idxprom1
186 %1 = load i32, i32* %arrayidx2, align 4
187 %add = add nsw i32 %0, %1
188 %add3 = add nsw i32 %i.02, %M
189 %idxprom4 = sext i32 %add3 to i64
190 %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 %idxprom4
191 store i32 %add, i32* %arrayidx5, align 4
194 for.inc: ; preds = %for.body
195 %inc = add nsw i32 %i.02, 1
196 %cmp = icmp slt i32 %inc, %M
197 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
199 for.cond.for.end_crit_edge: ; preds = %for.inc
202 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
203 %call = call i32 @dummy(i32* getelementptr inbounds ([100 x i32], [100 x i32]* @a, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @b, i32 0, i32 0))
207 declare i32 @dummy(i32*, i32*)
209 ; A case where zext should not be eliminated when its operands could only be extended by sext.
210 define i32 @foo3(i32 %M) {
211 ; CHECK-LABEL: @foo3(
213 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[M:%.*]]
214 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
215 ; CHECK: for.body.lr.ph:
216 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[M]] to i64
217 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[M]] to i64
218 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
220 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_LR_PH]] ]
221 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 [[INDVARS_IV]]
222 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
223 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @b, i64 0, i64 [[INDVARS_IV]]
224 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
225 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
226 ; CHECK-NEXT: [[TMP3:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]]
227 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
228 ; CHECK-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP4]] to i64
229 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 [[IDXPROM4]]
230 ; CHECK-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX5]], align 4
231 ; CHECK-NEXT: br label [[FOR_INC]]
233 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
234 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
235 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]]
236 ; CHECK: for.cond.for.end_crit_edge:
237 ; CHECK-NEXT: br label [[FOR_END]]
239 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @dummy(i32* getelementptr inbounds ([100 x i32], [100 x i32]* @a, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @b, i32 0, i32 0))
240 ; CHECK-NEXT: ret i32 0
243 %cmp1 = icmp slt i32 0, %M
244 br i1 %cmp1, label %for.body.lr.ph, label %for.end
246 for.body.lr.ph: ; preds = %entry
249 for.body: ; preds = %for.body.lr.ph, %for.inc
250 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
251 %idxprom = sext i32 %i.02 to i64
252 %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 %idxprom
253 %0 = load i32, i32* %arrayidx, align 4
254 %idxprom1 = sext i32 %i.02 to i64
255 %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* @b, i64 0, i64 %idxprom1
256 %1 = load i32, i32* %arrayidx2, align 4
257 %add = add nsw i32 %0, %1
258 %add3 = add nsw i32 %i.02, %M
259 %idxprom4 = zext i32 %add3 to i64
260 %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* @a, i64 0, i64 %idxprom4
261 store i32 %add, i32* %arrayidx5, align 4
264 for.inc: ; preds = %for.body
265 %inc = add nsw i32 %i.02, 1
266 %cmp = icmp slt i32 %inc, %M
267 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
269 for.cond.for.end_crit_edge: ; preds = %for.inc
272 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
273 %call = call i32 @dummy(i32* getelementptr inbounds ([100 x i32], [100 x i32]* @a, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @b, i32 0, i32 0))
277 %struct.image = type {i32, i32}
278 define i32 @foo4(%struct.image* %input, i32 %length, i32* %in) {
279 ; CHECK-LABEL: @foo4(
281 ; CHECK-NEXT: [[STRIDE:%.*]] = getelementptr inbounds [[STRUCT_IMAGE:%.*]], %struct.image* [[INPUT:%.*]], i64 0, i32 1
282 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[STRIDE]], align 4
283 ; CHECK-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[LENGTH:%.*]], 1
284 ; CHECK-NEXT: br i1 [[CMP17]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
285 ; CHECK: for.body.lr.ph:
286 ; CHECK-NEXT: [[CHANNEL:%.*]] = getelementptr inbounds [[STRUCT_IMAGE]], %struct.image* [[INPUT]], i64 0, i32 0
287 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
288 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LENGTH]] to i64
289 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
290 ; CHECK: for.cond.cleanup.loopexit:
291 ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[TMP10:%.*]], [[FOR_BODY]] ]
292 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
293 ; CHECK: for.cond.cleanup:
294 ; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
295 ; CHECK-NEXT: ret i32 [[TMP3]]
297 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 1, [[FOR_BODY_LR_PH]] ]
298 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
299 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[CHANNEL]], align 8
300 ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
301 ; CHECK-NEXT: [[TMP6:%.*]] = mul nsw i64 [[TMP5]], [[INDVARS_IV_NEXT]]
302 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[TMP6]]
303 ; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ADD_PTR]], align 4
304 ; CHECK-NEXT: [[TMP8:%.*]] = mul nsw i64 [[TMP1]], [[INDVARS_IV_NEXT]]
305 ; CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP8]]
306 ; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[ADD_PTR1]], align 4
307 ; CHECK-NEXT: [[TMP10]] = add i32 [[TMP7]], [[TMP9]]
308 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
309 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]]
312 %stride = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 1
313 %0 = load i32, i32* %stride, align 4
314 %cmp17 = icmp sgt i32 %length, 1
315 br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
317 for.body.lr.ph: ; preds = %entry
318 %channel = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 0
321 for.cond.cleanup.loopexit: ; preds = %for.body
322 %1 = phi i32 [ %6, %for.body ]
323 br label %for.cond.cleanup
325 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
326 %2 = phi i32 [ 0, %entry ], [ %1, %for.cond.cleanup.loopexit ]
329 ; mul instruction below is widened instead of generating a truncate instruction for it
330 ; regardless if Load operand of mul is inside or outside the loop (we have both cases).
331 for.body: ; preds = %for.body.lr.ph, %for.body
332 %x.018 = phi i32 [ 1, %for.body.lr.ph ], [ %add, %for.body ]
333 %add = add nuw nsw i32 %x.018, 1
334 %3 = load i32, i32* %channel, align 8
335 %mul = mul nsw i32 %3, %add
336 %idx.ext = sext i32 %mul to i64
337 %add.ptr = getelementptr inbounds i32, i32* %in, i64 %idx.ext
338 %4 = load i32, i32* %add.ptr, align 4
339 %mul1 = mul nsw i32 %0, %add
340 %idx.ext1 = sext i32 %mul1 to i64
341 %add.ptr1 = getelementptr inbounds i32, i32* %in, i64 %idx.ext1
342 %5 = load i32, i32* %add.ptr1, align 4
344 %cmp = icmp slt i32 %add, %length
345 br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
349 define i32 @foo5(%struct.image* %input, i32 %length, i32* %in) {
350 ; CHECK-LABEL: @foo5(
352 ; CHECK-NEXT: [[STRIDE:%.*]] = getelementptr inbounds [[STRUCT_IMAGE:%.*]], %struct.image* [[INPUT:%.*]], i64 0, i32 1
353 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[STRIDE]], align 4
354 ; CHECK-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[LENGTH:%.*]], 1
355 ; CHECK-NEXT: br i1 [[CMP17]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
356 ; CHECK: for.body.lr.ph:
357 ; CHECK-NEXT: [[CHANNEL:%.*]] = getelementptr inbounds [[STRUCT_IMAGE]], %struct.image* [[INPUT]], i64 0, i32 0
358 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
359 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[LENGTH]] to i64
360 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
361 ; CHECK: for.cond.cleanup.loopexit:
362 ; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[TMP10:%.*]], [[FOR_BODY]] ]
363 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
364 ; CHECK: for.cond.cleanup:
365 ; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ]
366 ; CHECK-NEXT: ret i32 [[TMP3]]
368 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 1, [[FOR_BODY_LR_PH]] ]
369 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
370 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[CHANNEL]], align 8
371 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
372 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], [[TMP5]]
373 ; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[MUL]] to i64
374 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 [[IDX_EXT]]
375 ; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ADD_PTR]], align 4
376 ; CHECK-NEXT: [[TMP7:%.*]] = mul nsw i64 [[TMP1]], [[INDVARS_IV_NEXT]]
377 ; CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds i32, i32* [[IN]], i64 [[TMP7]]
378 ; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ADD_PTR1]], align 4
379 ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP6]], [[TMP8]]
380 ; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[MUL]]
381 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
382 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP_LOOPEXIT]]
385 %stride = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 1
386 %0 = load i32, i32* %stride, align 4
387 %cmp17 = icmp sgt i32 %length, 1
388 br i1 %cmp17, label %for.body.lr.ph, label %for.cond.cleanup
390 for.body.lr.ph: ; preds = %entry
391 %channel = getelementptr inbounds %struct.image, %struct.image* %input, i64 0, i32 0
394 for.cond.cleanup.loopexit: ; preds = %for.body
395 %1 = phi i32 [ %7, %for.body ]
396 br label %for.cond.cleanup
398 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
399 %2 = phi i32 [ 0, %entry ], [ %1, %for.cond.cleanup.loopexit ]
402 ; This example is the same as above except that the first mul is used in two places
403 ; and this may result in having two versions of the multiply: an i32 and i64 version.
404 ; In this case, keep the trucate instructions to avoid this redundancy.
405 for.body: ; preds = %for.body.lr.ph, %for.body
406 %x.018 = phi i32 [ 1, %for.body.lr.ph ], [ %add, %for.body ]
407 %add = add nuw nsw i32 %x.018, 1
408 %3 = load i32, i32* %channel, align 8
409 %mul = mul nsw i32 %3, %add
410 %idx.ext = sext i32 %mul to i64
411 %add.ptr = getelementptr inbounds i32, i32* %in, i64 %idx.ext
412 %4 = load i32, i32* %add.ptr, align 4
413 %mul1 = mul nsw i32 %0, %add
414 %idx.ext1 = sext i32 %mul1 to i64
415 %add.ptr1 = getelementptr inbounds i32, i32* %in, i64 %idx.ext1
416 %5 = load i32, i32* %add.ptr1, align 4
418 %7 = add i32 %6, %mul
419 %cmp = icmp slt i32 %add, %length
420 br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit