1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -loop-versioning -S < %s | FileCheck %s -check-prefix=LV
4 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
8 ; for (int i = 0; i < n; i++) {
9 ; A[2 * index] = A[2 * index] + B[i];
13 ; SCEV is unable to prove that A[2 * i] does not overflow.
15 ; Analyzing the IR does not help us because the GEPs are not
16 ; affine AddRecExprs. However, we can turn them into AddRecExprs
17 ; using SCEV Predicates.
19 ; Once we have an affine expression we need to add an additional NUSW
20 ; to check that the pointers don't wrap since the GEPs are not
23 ; The expression for %mul_ext as analyzed by SCEV is
24 ; (zext i32 {0,+,2}<%for.body> to i64)
25 ; We have added the nusw flag to turn this expression into the SCEV expression:
26 ; i64 {0,+,2}<%for.body>
28 define void @f1(i16* noalias %a,
30 ; LV-NEXT: for.body.lver.check:
31 ; LV-NEXT: [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
32 ; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1
33 ; LV-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
34 ; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
35 ; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
36 ; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
37 ; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
38 ; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
39 ; LV-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
40 ; LV-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
41 ; LV-NEXT: [[TMP18:%.*]] = or i1 [[TMP7]], [[TMP17]]
42 ; LV-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
43 ; LV: for.body.ph.lver.orig:
44 ; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
45 ; LV: for.body.lver.orig:
46 ; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
47 ; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
48 ; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
49 ; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
50 ; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
51 ; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
52 ; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
53 ; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
54 ; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
55 ; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
56 ; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
57 ; LV-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
58 ; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
59 ; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
61 ; LV-NEXT: br label [[FOR_BODY:%.*]]
63 ; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
64 ; LV-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
65 ; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
66 ; LV-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
67 ; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
68 ; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
69 ; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
70 ; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
71 ; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
72 ; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
73 ; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
74 ; LV-NEXT: [[INC1]] = add i32 [[IND1]], 1
75 ; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
76 ; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT3:%.*]], label [[FOR_BODY]]
77 ; LV: for.end.loopexit:
78 ; LV-NEXT: br label [[FOR_END:%.*]]
79 ; LV: for.end.loopexit3:
80 ; LV-NEXT: br label [[FOR_END]]
84 i16* noalias %b, i64 %N) {
88 for.body: ; preds = %for.body, %entry
89 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
90 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
92 %mul = mul i32 %ind1, 2
93 %mul_ext = zext i32 %mul to i64
95 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
96 %loadA = load i16, i16* %arrayidxA, align 2
98 %arrayidxB = getelementptr i16, i16* %b, i64 %ind
99 %loadB = load i16, i16* %arrayidxB, align 2
101 %add = mul i16 %loadA, %loadB
103 store i16 %add, i16* %arrayidxA, align 2
105 %inc = add nuw nsw i64 %ind, 1
106 %inc1 = add i32 %ind1, 1
108 %exitcond = icmp eq i64 %inc, %N
109 br i1 %exitcond, label %for.end, label %for.body
111 for.end: ; preds = %for.body
116 ; unsigned index = n;
117 ; for (int i = 0; i < n; i++) {
118 ; A[2 * index] = A[2 * index] + B[i];
122 ; the SCEV expression for 2 * index is not an AddRecExpr
123 ; (and implictly not affine). However, we are able to make assumptions
124 ; that will turn the expression into an affine one and continue the
127 ; Once we have an affine expression we need to add an additional NUSW
128 ; to check that the pointers don't wrap since the GEPs are not
131 ; This loop has a negative stride for A, and the nusw flag is required in
132 ; order to properly extend the increment from i32 -4 to i64 -4.
134 ; The expression for %mul_ext as analyzed by SCEV is
135 ; (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)
136 ; We have added the nusw flag to turn this expression into the following SCEV:
137 ; i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
139 define void @f2(i16* noalias %a,
141 ; LV-NEXT: for.body.lver.check:
142 ; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
143 ; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1
144 ; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
145 ; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
146 ; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
147 ; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
148 ; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
149 ; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
150 ; LV-NEXT: [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP1]]
151 ; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
152 ; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
153 ; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]]
154 ; LV-NEXT: [[TMP12:%.*]] = trunc i64 [[N]] to i31
155 ; LV-NEXT: [[TMP13:%.*]] = zext i31 [[TMP12]] to i64
156 ; LV-NEXT: [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 1
157 ; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP14]]
158 ; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
159 ; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
160 ; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
161 ; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
162 ; LV-NEXT: [[TMP15:%.*]] = sub i64 0, [[MUL_RESULT3]]
163 ; LV-NEXT: [[TMP17:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP15]]
164 ; LV-NEXT: [[TMP18:%.*]] = icmp ugt i8* [[TMP17]], [[SCEVGEP5]]
165 ; LV-NEXT: [[TMP21:%.*]] = or i1 [[TMP18]], [[MUL_OVERFLOW4]]
166 ; LV-NEXT: [[TMP22:%.*]] = or i1 [[TMP10]], [[TMP21]]
167 ; LV-NEXT: br i1 [[TMP22]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
168 ; LV: for.body.ph.lver.orig:
169 ; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
170 ; LV: for.body.lver.orig:
171 ; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
172 ; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
173 ; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
174 ; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
175 ; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
176 ; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
177 ; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
178 ; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
179 ; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
180 ; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
181 ; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
182 ; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
183 ; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
184 ; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
186 ; LV-NEXT: br label [[FOR_BODY:%.*]]
188 ; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
189 ; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
190 ; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
191 ; LV-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
192 ; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
193 ; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
194 ; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
195 ; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
196 ; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
197 ; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
198 ; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
199 ; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1
200 ; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
201 ; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
202 ; LV: for.end.loopexit:
203 ; LV-NEXT: br label [[FOR_END:%.*]]
204 ; LV: for.end.loopexit6:
205 ; LV-NEXT: br label [[FOR_END]]
209 i16* noalias %b, i64 %N) {
211 %TruncN = trunc i64 %N to i32
214 for.body: ; preds = %for.body, %entry
215 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
216 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
218 %mul = mul i32 %ind1, 2
219 %mul_ext = zext i32 %mul to i64
221 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
222 %loadA = load i16, i16* %arrayidxA, align 2
224 %arrayidxB = getelementptr i16, i16* %b, i64 %ind
225 %loadB = load i16, i16* %arrayidxB, align 2
227 %add = mul i16 %loadA, %loadB
229 store i16 %add, i16* %arrayidxA, align 2
231 %inc = add nuw nsw i64 %ind, 1
232 %dec = sub i32 %ind1, 1
234 %exitcond = icmp eq i64 %inc, %N
235 br i1 %exitcond, label %for.end, label %for.body
237 for.end: ; preds = %for.body
241 ; We replicate the tests above, but this time sign extend 2 * index instead
242 ; of zero extending it.
244 ; The expression for %mul_ext as analyzed by SCEV is
245 ; i64 (sext i32 {0,+,2}<%for.body> to i64)
246 ; We have added the nssw flag to turn this expression into the following SCEV:
247 ; i64 {0,+,2}<%for.body>
249 define void @f3(i16* noalias %a,
251 ; LV-NEXT: for.body.lver.check:
252 ; LV-NEXT: [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
253 ; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1
254 ; LV-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
255 ; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
256 ; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
257 ; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
258 ; LV-NEXT: [[TMP5:%.*]] = icmp slt i32 [[MUL_RESULT]], 0
259 ; LV-NEXT: [[TMP8:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
260 ; LV-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
261 ; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[TMP7]]
262 ; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
263 ; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
264 ; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
265 ; LV-NEXT: [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
266 ; LV-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
267 ; LV-NEXT: [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
268 ; LV-NEXT: [[TMP17:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW4]]
269 ; LV-NEXT: [[TMP18:%.*]] = or i1 [[TMP9]], [[TMP17]]
270 ; LV-NEXT: br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
271 ; LV: for.body.ph.lver.orig:
272 ; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
273 ; LV: for.body.lver.orig:
274 ; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
275 ; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
276 ; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
277 ; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
278 ; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
279 ; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
280 ; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
281 ; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
282 ; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
283 ; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
284 ; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
285 ; LV-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
286 ; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
287 ; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
289 ; LV-NEXT: br label [[FOR_BODY:%.*]]
291 ; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
292 ; LV-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
293 ; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
294 ; LV-NEXT: [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
295 ; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
296 ; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
297 ; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
298 ; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
299 ; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
300 ; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
301 ; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
302 ; LV-NEXT: [[INC1]] = add i32 [[IND1]], 1
303 ; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
304 ; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
305 ; LV: for.end.loopexit:
306 ; LV-NEXT: br label [[FOR_END:%.*]]
307 ; LV: for.end.loopexit6:
308 ; LV-NEXT: br label [[FOR_END]]
312 i16* noalias %b, i64 %N) {
316 for.body: ; preds = %for.body, %entry
317 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
318 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
320 %mul = mul i32 %ind1, 2
321 %mul_ext = sext i32 %mul to i64
323 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
324 %loadA = load i16, i16* %arrayidxA, align 2
326 %arrayidxB = getelementptr i16, i16* %b, i64 %ind
327 %loadB = load i16, i16* %arrayidxB, align 2
329 %add = mul i16 %loadA, %loadB
331 store i16 %add, i16* %arrayidxA, align 2
333 %inc = add nuw nsw i64 %ind, 1
334 %inc1 = add i32 %ind1, 1
336 %exitcond = icmp eq i64 %inc, %N
337 br i1 %exitcond, label %for.end, label %for.body
339 for.end: ; preds = %for.body
343 define void @f4(i16* noalias %a,
345 ; LV-NEXT: for.body.lver.check:
346 ; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
347 ; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1
348 ; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
349 ; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
350 ; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
351 ; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
352 ; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
353 ; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
354 ; LV-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
355 ; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
356 ; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
357 ; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]]
358 ; LV-NEXT: [[TMP12:%.*]] = sext i32 [[TMP1]] to i64
359 ; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]]
360 ; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
361 ; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
362 ; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
363 ; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
364 ; LV-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]]
365 ; LV-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]]
366 ; LV-NEXT: [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]]
367 ; LV-NEXT: [[TMP19:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]]
368 ; LV-NEXT: [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]]
369 ; LV-NEXT: br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
370 ; LV: for.body.ph.lver.orig:
371 ; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
372 ; LV: for.body.lver.orig:
373 ; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
374 ; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
375 ; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
376 ; LV-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
377 ; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
378 ; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
379 ; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
380 ; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
381 ; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
382 ; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
383 ; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
384 ; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
385 ; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
386 ; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
388 ; LV-NEXT: br label [[FOR_BODY:%.*]]
390 ; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
391 ; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
392 ; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
393 ; LV-NEXT: [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
394 ; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
395 ; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
396 ; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
397 ; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
398 ; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
399 ; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
400 ; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
401 ; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1
402 ; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
403 ; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
404 ; LV: for.end.loopexit:
405 ; LV-NEXT: br label [[FOR_END:%.*]]
406 ; LV: for.end.loopexit6:
407 ; LV-NEXT: br label [[FOR_END]]
411 i16* noalias %b, i64 %N) {
413 %TruncN = trunc i64 %N to i32
416 for.body: ; preds = %for.body, %entry
417 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
418 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
420 %mul = mul i32 %ind1, 2
421 %mul_ext = sext i32 %mul to i64
423 %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
424 %loadA = load i16, i16* %arrayidxA, align 2
426 %arrayidxB = getelementptr i16, i16* %b, i64 %ind
427 %loadB = load i16, i16* %arrayidxB, align 2
429 %add = mul i16 %loadA, %loadB
431 store i16 %add, i16* %arrayidxA, align 2
433 %inc = add nuw nsw i64 %ind, 1
434 %dec = sub i32 %ind1, 1
436 %exitcond = icmp eq i64 %inc, %N
437 br i1 %exitcond, label %for.end, label %for.body
439 for.end: ; preds = %for.body
443 ; The following function is similar to the one above, but has the GEP
444 ; to pointer %A inbounds. The index %mul doesn't have the nsw flag.
445 ; This means that the SCEV expression for %mul can wrap and we need
446 ; a SCEV predicate to continue analysis.
448 ; We can still analyze this by adding the required no wrap SCEV predicates.
450 define void @f5(i16* noalias %a,
452 ; LV-NEXT: for.body.lver.check:
453 ; LV-NEXT: [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
454 ; LV-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1
455 ; LV-NEXT: [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
456 ; LV-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
457 ; LV-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
458 ; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
459 ; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
460 ; LV-NEXT: [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
461 ; LV-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
462 ; LV-NEXT: [[TMP9:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
463 ; LV-NEXT: [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
464 ; LV-NEXT: [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]]
465 ; LV-NEXT: [[TMP12:%.*]] = sext i32 [[TMP1]] to i64
466 ; LV-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]]
467 ; LV-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
468 ; LV-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
469 ; LV-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
470 ; LV-NEXT: [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
471 ; LV-NEXT: [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]]
472 ; LV-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]]
473 ; LV-NEXT: [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]]
474 ; LV-NEXT: [[TMP19:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]]
475 ; LV-NEXT: [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]]
476 ; LV-NEXT: br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
477 ; LV: for.body.ph.lver.orig:
478 ; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
479 ; LV: for.body.lver.orig:
480 ; LV-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
481 ; LV-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
482 ; LV-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
483 ; LV-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL_LVER_ORIG]]
484 ; LV-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
485 ; LV-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
486 ; LV-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
487 ; LV-NEXT: [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
488 ; LV-NEXT: store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
489 ; LV-NEXT: [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
490 ; LV-NEXT: [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
491 ; LV-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
492 ; LV-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
494 ; LV-NEXT: br label [[FOR_BODY:%.*]]
496 ; LV-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
497 ; LV-NEXT: [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
498 ; LV-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2
499 ; LV-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL]]
500 ; LV-NEXT: [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
501 ; LV-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 [[IND]]
502 ; LV-NEXT: [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
503 ; LV-NEXT: [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
504 ; LV-NEXT: store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
505 ; LV-NEXT: [[INC]] = add nuw nsw i64 [[IND]], 1
506 ; LV-NEXT: [[DEC]] = sub i32 [[IND1]], 1
507 ; LV-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
508 ; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
509 ; LV: for.end.loopexit:
510 ; LV-NEXT: br label [[FOR_END:%.*]]
511 ; LV: for.end.loopexit6:
512 ; LV-NEXT: br label [[FOR_END]]
516 i16* noalias %b, i64 %N) {
518 %TruncN = trunc i64 %N to i32
521 for.body: ; preds = %for.body, %entry
522 %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
523 %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
525 %mul = mul i32 %ind1, 2
527 %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul
528 %loadA = load i16, i16* %arrayidxA, align 2
530 %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
531 %loadB = load i16, i16* %arrayidxB, align 2
533 %add = mul i16 %loadA, %loadB
535 store i16 %add, i16* %arrayidxA, align 2
537 %inc = add nuw nsw i64 %ind, 1
538 %dec = sub i32 %ind1, 1
540 %exitcond = icmp eq i64 %inc, %N
541 br i1 %exitcond, label %for.end, label %for.body
543 for.end: ; preds = %for.body