1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -basic-aa -loop-idiom < %s -S | FileCheck %s
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
6 ; CHECK: @.memset_pattern = private unnamed_addr constant [4 x i32] [i32 1, i32 1, i32 1, i32 1]
9 ; CHECK: @.memset_pattern.1 = private unnamed_addr constant [2 x i32*] [i32* @G, i32* @G]
11 target triple = "x86_64-apple-darwin10.0.0"
13 define void @test1(i8* %Base, i64 %Size) nounwind ssp {
14 ; CHECK-LABEL: @test1(
16 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
17 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
19 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
20 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
21 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
22 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
23 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
25 ; CHECK-NEXT: ret void
27 bb.nph: ; preds = %entry
30 for.body: ; preds = %bb.nph, %for.body
31 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
32 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
33 store i8 0, i8* %I.0.014, align 1
34 %indvar.next = add i64 %indvar, 1
35 %exitcond = icmp eq i64 %indvar.next, %Size
36 br i1 %exitcond, label %for.end, label %for.body
38 for.end: ; preds = %for.body, %entry
42 ; Make sure memset is formed for larger than 1 byte stores, and that the
43 ; alignment of the store is preserved
44 define void @test1_i16(i16* align 2 %Base, i64 %Size) nounwind ssp {
45 ; CHECK-LABEL: @test1_i16(
47 ; CHECK-NEXT: [[BASE1:%.*]] = bitcast i16* [[BASE:%.*]] to i8*
48 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 1
49 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 2 [[BASE1]], i8 0, i64 [[TMP0]], i1 false)
50 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
52 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
53 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i16, i16* [[BASE]], i64 [[INDVAR]]
54 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
55 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
56 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
58 ; CHECK-NEXT: ret void
60 bb.nph: ; preds = %entry
63 for.body: ; preds = %bb.nph, %for.body
64 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
65 %I.0.014 = getelementptr i16, i16* %Base, i64 %indvar
66 store i16 0, i16* %I.0.014, align 2
67 %indvar.next = add i64 %indvar, 1
68 %exitcond = icmp eq i64 %indvar.next, %Size
69 br i1 %exitcond, label %for.end, label %for.body
71 for.end: ; preds = %for.body, %entry
75 ; This is a loop that was rotated but where the blocks weren't merged. This
76 ; shouldn't perturb us.
77 define void @test1a(i8* %Base, i64 %Size) nounwind ssp {
78 ; CHECK-LABEL: @test1a(
80 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
81 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
83 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY_CONT:%.*]] ]
84 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
85 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
86 ; CHECK-NEXT: br label [[FOR_BODY_CONT]]
87 ; CHECK: for.body.cont:
88 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
89 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
91 ; CHECK-NEXT: ret void
93 bb.nph: ; preds = %entry
96 for.body: ; preds = %bb.nph, %for.body
97 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body.cont ]
98 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
99 store i8 0, i8* %I.0.014, align 1
100 %indvar.next = add i64 %indvar, 1
101 br label %for.body.cont
103 %exitcond = icmp eq i64 %indvar.next, %Size
104 br i1 %exitcond, label %for.end, label %for.body
106 for.end: ; preds = %for.body, %entry
111 define void @test2(i32* %Base, i64 %Size) nounwind ssp {
112 ; CHECK-LABEL: @test2(
114 ; CHECK-NEXT: [[BASE1:%.*]] = bitcast i32* [[BASE:%.*]] to i8*
115 ; CHECK-NEXT: [[CMP10:%.*]] = icmp eq i64 [[SIZE:%.*]], 0
116 ; CHECK-NEXT: br i1 [[CMP10]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
117 ; CHECK: for.body.preheader:
118 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE]], 2
119 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[BASE1]], i8 1, i64 [[TMP0]], i1 false)
120 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
122 ; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
123 ; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[I_011]]
124 ; CHECK-NEXT: [[INC]] = add nsw i64 [[I_011]], 1
125 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[SIZE]]
126 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
127 ; CHECK: for.end.loopexit:
128 ; CHECK-NEXT: br label [[FOR_END]]
130 ; CHECK-NEXT: ret void
133 %cmp10 = icmp eq i64 %Size, 0
134 br i1 %cmp10, label %for.end, label %for.body
136 for.body: ; preds = %entry, %for.body
137 %i.011 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
138 %add.ptr.i = getelementptr i32, i32* %Base, i64 %i.011
139 store i32 16843009, i32* %add.ptr.i, align 4
140 %inc = add nsw i64 %i.011, 1
141 %exitcond = icmp eq i64 %inc, %Size
142 br i1 %exitcond, label %for.end, label %for.body
144 for.end: ; preds = %for.body, %entry
148 ; This is a case where there is an extra may-aliased store in the loop, we can't
149 ; promote the memset.
150 define void @test3(i32* %Base, i64 %Size, i8 *%MayAlias) nounwind ssp {
151 ; CHECK-LABEL: @test3(
153 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
155 ; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
156 ; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[I_011]]
157 ; CHECK-NEXT: store i32 16843009, i32* [[ADD_PTR_I]], align 4
158 ; CHECK-NEXT: store i8 42, i8* [[MAYALIAS:%.*]], align 1
159 ; CHECK-NEXT: [[INC]] = add nsw i64 [[I_011]], 1
160 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[SIZE:%.*]]
161 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
163 ; CHECK-NEXT: ret void
168 for.body: ; preds = %entry, %for.body
169 %i.011 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
170 %add.ptr.i = getelementptr i32, i32* %Base, i64 %i.011
171 store i32 16843009, i32* %add.ptr.i, align 4
173 store i8 42, i8* %MayAlias
174 %inc = add nsw i64 %i.011, 1
175 %exitcond = icmp eq i64 %inc, %Size
176 br i1 %exitcond, label %for.end, label %for.body
178 for.end: ; preds = %entry
182 ; Make sure the first store in the loop is turned into a memset.
183 define void @test4(i8* %Base) nounwind ssp {
184 ; CHECK-LABEL: @test4(
185 ; CHECK-NEXT: bb.nph:
186 ; CHECK-NEXT: [[BASE100:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 1000
187 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE]], i8 0, i64 100, i1 false)
188 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
190 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
191 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
192 ; CHECK-NEXT: store i8 42, i8* [[BASE100]], align 1
193 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
194 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 100
195 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
197 ; CHECK-NEXT: ret void
199 bb.nph: ; preds = %entry
200 %Base100 = getelementptr i8, i8* %Base, i64 1000
203 for.body: ; preds = %bb.nph, %for.body
204 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
205 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
206 store i8 0, i8* %I.0.014, align 1
208 ;; Store beyond the range memset, should be safe to promote.
209 store i8 42, i8* %Base100
211 %indvar.next = add i64 %indvar, 1
212 %exitcond = icmp eq i64 %indvar.next, 100
213 br i1 %exitcond, label %for.end, label %for.body
215 for.end: ; preds = %for.body, %entry
219 ; This can't be promoted: the memset is a store of a loop variant value.
220 define void @test5(i8* %Base, i64 %Size) nounwind ssp {
221 ; CHECK-LABEL: @test5(
222 ; CHECK-NEXT: bb.nph:
223 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
225 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
226 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 [[INDVAR]]
227 ; CHECK-NEXT: [[V:%.*]] = trunc i64 [[INDVAR]] to i8
228 ; CHECK-NEXT: store i8 [[V]], i8* [[I_0_014]], align 1
229 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
230 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
231 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
233 ; CHECK-NEXT: ret void
235 bb.nph: ; preds = %entry
238 for.body: ; preds = %bb.nph, %for.body
239 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
240 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
242 %V = trunc i64 %indvar to i8
243 store i8 %V, i8* %I.0.014, align 1
244 %indvar.next = add i64 %indvar, 1
245 %exitcond = icmp eq i64 %indvar.next, %Size
246 br i1 %exitcond, label %for.end, label %for.body
248 for.end: ; preds = %for.body, %entry
254 define void @test6(i64 %Size) nounwind ssp {
255 ; CHECK-LABEL: @test6(
256 ; CHECK-NEXT: bb.nph:
257 ; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
258 ; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
259 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i1 false)
260 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
262 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
263 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
264 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
265 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
266 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
267 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
268 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
270 ; CHECK-NEXT: ret void
273 %Base = alloca i8, i32 10000
274 %Dest = alloca i8, i32 10000
277 for.body: ; preds = %bb.nph, %for.body
278 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
279 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
280 %DestI = getelementptr i8, i8* %Dest, i64 %indvar
281 %V = load i8, i8* %I.0.014, align 1
282 store i8 %V, i8* %DestI, align 1
283 %indvar.next = add i64 %indvar, 1
284 %exitcond = icmp eq i64 %indvar.next, %Size
285 br i1 %exitcond, label %for.end, label %for.body
287 for.end: ; preds = %for.body, %entry
291 ;; memcpy formation, check alignment
292 define void @test6_dest_align(i32* noalias align 1 %Base, i32* noalias align 4 %Dest, i64 %Size) nounwind ssp {
293 ; CHECK-LABEL: @test6_dest_align(
294 ; CHECK-NEXT: bb.nph:
295 ; CHECK-NEXT: [[DEST1:%.*]] = bitcast i32* [[DEST:%.*]] to i8*
296 ; CHECK-NEXT: [[BASE2:%.*]] = bitcast i32* [[BASE:%.*]] to i8*
297 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 2
298 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST1]], i8* align 1 [[BASE2]], i64 [[TMP0]], i1 false)
299 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
301 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
302 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
303 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
304 ; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[I_0_014]], align 1
305 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
306 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
307 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
309 ; CHECK-NEXT: ret void
314 for.body: ; preds = %bb.nph, %for.body
315 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
316 %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
317 %DestI = getelementptr i32, i32* %Dest, i64 %indvar
318 %V = load i32, i32* %I.0.014, align 1
319 store i32 %V, i32* %DestI, align 4
320 %indvar.next = add i64 %indvar, 1
321 %exitcond = icmp eq i64 %indvar.next, %Size
322 br i1 %exitcond, label %for.end, label %for.body
324 for.end: ; preds = %for.body, %entry
328 ;; memcpy formation, check alignment
329 define void @test6_src_align(i32* noalias align 4 %Base, i32* noalias align 1 %Dest, i64 %Size) nounwind ssp {
330 ; CHECK-LABEL: @test6_src_align(
331 ; CHECK-NEXT: bb.nph:
332 ; CHECK-NEXT: [[DEST1:%.*]] = bitcast i32* [[DEST:%.*]] to i8*
333 ; CHECK-NEXT: [[BASE2:%.*]] = bitcast i32* [[BASE:%.*]] to i8*
334 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 2
335 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST1]], i8* align 4 [[BASE2]], i64 [[TMP0]], i1 false)
336 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
338 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
339 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
340 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
341 ; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[I_0_014]], align 4
342 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
343 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
344 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
346 ; CHECK-NEXT: ret void
351 for.body: ; preds = %bb.nph, %for.body
352 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
353 %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
354 %DestI = getelementptr i32, i32* %Dest, i64 %indvar
355 %V = load i32, i32* %I.0.014, align 4
356 store i32 %V, i32* %DestI, align 1
357 %indvar.next = add i64 %indvar, 1
358 %exitcond = icmp eq i64 %indvar.next, %Size
359 br i1 %exitcond, label %for.end, label %for.body
361 for.end: ; preds = %for.body, %entry
366 ; This is a loop that was rotated but where the blocks weren't merged. This
367 ; shouldn't perturb us.
368 define void @test7(i8* %Base, i64 %Size) nounwind ssp {
369 ; CHECK-LABEL: @test7(
370 ; CHECK-NEXT: bb.nph:
371 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
372 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
374 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY_CONT:%.*]] ]
375 ; CHECK-NEXT: br label [[FOR_BODY_CONT]]
376 ; CHECK: for.body.cont:
377 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
378 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
379 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
380 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
382 ; CHECK-NEXT: ret void
384 bb.nph: ; preds = %entry
387 for.body: ; preds = %bb.nph, %for.body
388 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body.cont ]
389 br label %for.body.cont
391 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
392 store i8 0, i8* %I.0.014, align 1
393 %indvar.next = add i64 %indvar, 1
394 %exitcond = icmp eq i64 %indvar.next, %Size
395 br i1 %exitcond, label %for.end, label %for.body
397 for.end: ; preds = %for.body, %entry
401 ; This is a loop should not be transformed, it only executes one iteration.
402 define void @test8(i64* %Ptr, i64 %Size) nounwind ssp {
403 ; CHECK-LABEL: @test8(
404 ; CHECK-NEXT: bb.nph:
405 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
407 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
408 ; CHECK-NEXT: [[PI:%.*]] = getelementptr i64, i64* [[PTR:%.*]], i64 [[INDVAR]]
409 ; CHECK-NEXT: store i64 0, i64* [[PI]], align 8
410 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
411 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 1
412 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
414 ; CHECK-NEXT: ret void
416 bb.nph: ; preds = %entry
419 for.body: ; preds = %bb.nph, %for.body
420 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
421 %PI = getelementptr i64, i64* %Ptr, i64 %indvar
422 store i64 0, i64 *%PI
423 %indvar.next = add i64 %indvar, 1
424 %exitcond = icmp eq i64 %indvar.next, 1
425 br i1 %exitcond, label %for.end, label %for.body
427 for.end: ; preds = %for.body, %entry
431 declare i8* @external(i8*)
433 ;; This cannot be transformed into a memcpy, because the read-from location is
434 ;; mutated by the loop.
435 define void @test9(i64 %Size) nounwind ssp {
436 ; CHECK-LABEL: @test9(
437 ; CHECK-NEXT: bb.nph:
438 ; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
439 ; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
440 ; CHECK-NEXT: [[BASEALIAS:%.*]] = call i8* @external(i8* [[BASE]])
441 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
443 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
444 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
445 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
446 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
447 ; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1
448 ; CHECK-NEXT: store i8 4, i8* [[BASEALIAS]], align 1
449 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
450 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
451 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
453 ; CHECK-NEXT: ret void
456 %Base = alloca i8, i32 10000
457 %Dest = alloca i8, i32 10000
459 %BaseAlias = call i8* @external(i8* %Base)
462 for.body: ; preds = %bb.nph, %for.body
463 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
464 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
465 %DestI = getelementptr i8, i8* %Dest, i64 %indvar
466 %V = load i8, i8* %I.0.014, align 1
467 store i8 %V, i8* %DestI, align 1
469 ;; This store can clobber the input.
470 store i8 4, i8* %BaseAlias
472 %indvar.next = add i64 %indvar, 1
473 %exitcond = icmp eq i64 %indvar.next, %Size
474 br i1 %exitcond, label %for.end, label %for.body
476 for.end: ; preds = %for.body, %entry
480 ; Two dimensional nested loop should be promoted to one big memset.
481 define void @test10(i8* %X) nounwind ssp {
482 ; CHECK-LABEL: @test10(
484 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[X:%.*]], i8 0, i64 10000, i1 false)
485 ; CHECK-NEXT: br label [[BB_NPH:%.*]]
487 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_INC10:%.*]] ], [ 0, [[ENTRY:%.*]] ]
488 ; CHECK-NEXT: [[I_04:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC12:%.*]], [[FOR_INC10]] ]
489 ; CHECK-NEXT: [[TMP0:%.*]] = mul nuw nsw i64 [[INDVAR]], 100
490 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[X]], i64 [[TMP0]]
491 ; CHECK-NEXT: br label [[FOR_BODY5:%.*]]
493 ; CHECK-NEXT: [[J_02:%.*]] = phi i32 [ 0, [[BB_NPH]] ], [ [[INC:%.*]], [[FOR_BODY5]] ]
494 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_04]], 100
495 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[J_02]], [[MUL]]
496 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64
497 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[X]], i64 [[IDXPROM]]
498 ; CHECK-NEXT: [[INC]] = add nsw i32 [[J_02]], 1
499 ; CHECK-NEXT: [[CMP4:%.*]] = icmp eq i32 [[INC]], 100
500 ; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_INC10]], label [[FOR_BODY5]]
502 ; CHECK-NEXT: [[INC12]] = add nsw i32 [[I_04]], 1
503 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC12]], 100
504 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
505 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_END13:%.*]], label [[BB_NPH]]
507 ; CHECK-NEXT: ret void
512 bb.nph: ; preds = %entry, %for.inc10
513 %i.04 = phi i32 [ 0, %entry ], [ %inc12, %for.inc10 ]
516 for.body5: ; preds = %for.body5, %bb.nph
517 %j.02 = phi i32 [ 0, %bb.nph ], [ %inc, %for.body5 ]
518 %mul = mul nsw i32 %i.04, 100
519 %add = add nsw i32 %j.02, %mul
520 %idxprom = sext i32 %add to i64
521 %arrayidx = getelementptr inbounds i8, i8* %X, i64 %idxprom
522 store i8 0, i8* %arrayidx, align 1
523 %inc = add nsw i32 %j.02, 1
524 %cmp4 = icmp eq i32 %inc, 100
525 br i1 %cmp4, label %for.inc10, label %for.body5
527 for.inc10: ; preds = %for.body5
528 %inc12 = add nsw i32 %i.04, 1
529 %cmp = icmp eq i32 %inc12, 100
530 br i1 %cmp, label %for.end13, label %bb.nph
532 for.end13: ; preds = %for.inc10
536 ; On darwin10 (which is the triple in this .ll file) this loop can be turned
537 ; into a memset_pattern call.
539 define void @test11_pattern(i32* nocapture %P) nounwind ssp {
540 ; CHECK-LABEL: @test11_pattern(
542 ; CHECK-NEXT: [[P1:%.*]] = bitcast i32* [[P:%.*]] to i8*
543 ; CHECK-NEXT: call void @memset_pattern16(i8* [[P1]], i8* bitcast ([4 x i32]* @.memset_pattern to i8*), i64 40000)
544 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
546 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
547 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P]], i64 [[INDVAR]]
548 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
549 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
550 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
552 ; CHECK-NEXT: ret void
557 for.body: ; preds = %entry, %for.body
558 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
559 %arrayidx = getelementptr i32, i32* %P, i64 %indvar
560 store i32 1, i32* %arrayidx, align 4
561 %indvar.next = add i64 %indvar, 1
562 %exitcond = icmp eq i64 %indvar.next, 10000
563 br i1 %exitcond, label %for.end, label %for.body
565 for.end: ; preds = %for.body
569 ; Store of null should turn into memset of zero.
570 define void @test12(i32** nocapture %P) nounwind ssp {
571 ; CHECK-LABEL: @test12(
573 ; CHECK-NEXT: [[P1:%.*]] = bitcast i32** [[P:%.*]] to i8*
574 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[P1]], i8 0, i64 80000, i1 false)
575 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
577 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
578 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32*, i32** [[P]], i64 [[INDVAR]]
579 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
580 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
581 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
583 ; CHECK-NEXT: ret void
588 for.body: ; preds = %entry, %for.body
589 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
590 %arrayidx = getelementptr i32*, i32** %P, i64 %indvar
591 store i32* null, i32** %arrayidx, align 4
592 %indvar.next = add i64 %indvar, 1
593 %exitcond = icmp eq i64 %indvar.next, 10000
594 br i1 %exitcond, label %for.end, label %for.body
596 for.end: ; preds = %for.body
602 ; This store-of-address loop can be turned into a memset_pattern call.
604 define void @test13_pattern(i32** nocapture %P) nounwind ssp {
605 ; CHECK-LABEL: @test13_pattern(
607 ; CHECK-NEXT: [[P1:%.*]] = bitcast i32** [[P:%.*]] to i8*
608 ; CHECK-NEXT: call void @memset_pattern16(i8* [[P1]], i8* bitcast ([2 x i32*]* @.memset_pattern.1 to i8*), i64 80000)
609 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
611 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
612 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32*, i32** [[P]], i64 [[INDVAR]]
613 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
614 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
615 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
617 ; CHECK-NEXT: ret void
622 for.body: ; preds = %entry, %for.body
623 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
624 %arrayidx = getelementptr i32*, i32** %P, i64 %indvar
625 store i32* @G, i32** %arrayidx, align 4
626 %indvar.next = add i64 %indvar, 1
627 %exitcond = icmp eq i64 %indvar.next, 10000
628 br i1 %exitcond, label %for.end, label %for.body
630 for.end: ; preds = %for.body
636 ; PR9815 - This is a partial overlap case that cannot be safely transformed
638 @g_50 = global [7 x i32] [i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0], align 16
640 define i32 @test14() nounwind {
641 ; CHECK-LABEL: @test14(
643 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
645 ; CHECK-NEXT: [[T5:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
646 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[T5]], 4
647 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64
648 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 [[IDXPROM]]
649 ; CHECK-NEXT: [[T2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
650 ; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[T5]], 5
651 ; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[ADD4]] to i64
652 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 [[IDXPROM5]]
653 ; CHECK-NEXT: store i32 [[T2]], i32* [[ARRAYIDX6]], align 4
654 ; CHECK-NEXT: [[INC]] = add nsw i32 [[T5]], 1
655 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 2
656 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
658 ; CHECK-NEXT: [[T8:%.*]] = load i32, i32* getelementptr inbounds ([7 x i32], [7 x i32]* @g_50, i32 0, i64 6), align 4
659 ; CHECK-NEXT: ret i32 [[T8]]
664 for.body: ; preds = %for.inc, %for.body.lr.ph
665 %t5 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
666 %add = add nsw i32 %t5, 4
667 %idxprom = sext i32 %add to i64
668 %arrayidx = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom
669 %t2 = load i32, i32* %arrayidx, align 4
670 %add4 = add nsw i32 %t5, 5
671 %idxprom5 = sext i32 %add4 to i64
672 %arrayidx6 = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom5
673 store i32 %t2, i32* %arrayidx6, align 4
674 %inc = add nsw i32 %t5, 1
675 %cmp = icmp slt i32 %inc, 2
676 br i1 %cmp, label %for.body, label %for.end
678 for.end: ; preds = %for.inc
679 %t8 = load i32, i32* getelementptr inbounds ([7 x i32], [7 x i32]* @g_50, i32 0, i64 6), align 4
684 define void @PR14241(i32* %s, i64 %size) {
685 ; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught
686 ; LoopIdiom about memmove and strided loops, this got miscompiled into a memcpy
687 ; instead of a memmove. If we get the memmove transform back, this will catch
690 ; CHECK-LABEL: @PR14241(
692 ; CHECK-NEXT: [[S1:%.*]] = bitcast i32* [[S:%.*]] to i8*
693 ; CHECK-NEXT: [[END_IDX:%.*]] = add i64 [[SIZE:%.*]], -1
694 ; CHECK-NEXT: [[END_PTR:%.*]] = getelementptr inbounds i32, i32* [[S]], i64 [[END_IDX]]
695 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[S]], i64 1
696 ; CHECK-NEXT: [[SCEVGEP2:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
697 ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[SIZE]], 2
698 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], -8
699 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 2
700 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
701 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP3]], 4
702 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 [[S1]], i8* align 4 [[SCEVGEP2]], i64 [[TMP4]], i1 false)
703 ; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
705 ; CHECK-NEXT: [[PHI_PTR:%.*]] = phi i32* [ [[S]], [[ENTRY:%.*]] ], [ [[NEXT_PTR:%.*]], [[WHILE_BODY]] ]
706 ; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 1
707 ; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[SRC_PTR]], align 4
708 ; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 0
709 ; CHECK-NEXT: [[NEXT_PTR]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 1
710 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[NEXT_PTR]], [[END_PTR]]
711 ; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[WHILE_BODY]]
713 ; CHECK-NEXT: ret void
717 %end.idx = add i64 %size, -1
718 %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx
722 %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
723 %src.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
724 %val = load i32, i32* %src.ptr, align 4
725 %dst.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 0
726 store i32 %val, i32* %dst.ptr, align 4
727 %next.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
728 %cmp = icmp eq i32* %next.ptr, %end.ptr
729 br i1 %cmp, label %exit, label %while.body
735 ; Recognize loops with a negative stride.
736 define void @test15(i32* nocapture %f) {
737 ; CHECK-LABEL: @test15(
739 ; CHECK-NEXT: [[F1:%.*]] = bitcast i32* [[F:%.*]] to i8*
740 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[F1]], i8 0, i64 262148, i1 false)
741 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
743 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 65536, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
744 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
745 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
746 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0
747 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
748 ; CHECK: for.cond.cleanup:
749 ; CHECK-NEXT: ret void
755 %indvars.iv = phi i64 [ 65536, %entry ], [ %indvars.iv.next, %for.body ]
756 %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
757 store i32 0, i32* %arrayidx, align 4
758 %indvars.iv.next = add nsw i64 %indvars.iv, -1
759 %cmp = icmp sgt i64 %indvars.iv, 0
760 br i1 %cmp, label %for.body, label %for.cond.cleanup
766 ; Loop with a negative stride. Verify an aliasing write to f[65536] prevents
767 ; the creation of a memset.
768 define void @test16(i32* nocapture %f) {
769 ; CHECK-LABEL: @test16(
771 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 65536
772 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
774 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 65536, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
775 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
776 ; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
777 ; CHECK-NEXT: store i32 1, i32* [[ARRAYIDX1]], align 4
778 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
779 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0
780 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
781 ; CHECK: for.cond.cleanup:
782 ; CHECK-NEXT: ret void
785 %arrayidx1 = getelementptr inbounds i32, i32* %f, i64 65536
788 for.body: ; preds = %entry, %for.body
789 %indvars.iv = phi i64 [ 65536, %entry ], [ %indvars.iv.next, %for.body ]
790 %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
791 store i32 0, i32* %arrayidx, align 4
792 store i32 1, i32* %arrayidx1, align 4
793 %indvars.iv.next = add nsw i64 %indvars.iv, -1
794 %cmp = icmp sgt i64 %indvars.iv, 0
795 br i1 %cmp, label %for.body, label %for.cond.cleanup
797 for.cond.cleanup: ; preds = %for.body
801 ; Handle memcpy-able loops with negative stride.
802 define noalias i32* @test17(i32* nocapture readonly %a, i32 %c) {
803 ; CHECK-LABEL: @test17(
805 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[C:%.*]] to i64
806 ; CHECK-NEXT: [[MUL:%.*]] = shl nsw i64 [[CONV]], 2
807 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @malloc(i64 [[MUL]])
808 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[CALL]] to i32*
809 ; CHECK-NEXT: [[TOBOOL_9:%.*]] = icmp eq i32 [[C]], 0
810 ; CHECK-NEXT: br i1 [[TOBOOL_9]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
811 ; CHECK: while.body.preheader:
812 ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[C]], -1
813 ; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[TMP1]] to i64
814 ; CHECK-NEXT: [[TMP3:%.*]] = shl nsw i64 [[TMP2]], 2
815 ; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP1]] to i64
816 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2
817 ; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], [[TMP5]]
818 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[CALL]], i64 [[TMP6]]
819 ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP2]], [[TMP4]]
820 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[TMP7]]
821 ; CHECK-NEXT: [[SCEVGEP12:%.*]] = bitcast i32* [[SCEVGEP1]] to i8*
822 ; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[C]] to i64
823 ; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP8]], 2
824 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[SCEVGEP]], i8* align 4 [[SCEVGEP12]], i64 [[TMP9]], i1 false)
825 ; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
827 ; CHECK-NEXT: [[DEC10_IN:%.*]] = phi i32 [ [[DEC10:%.*]], [[WHILE_BODY]] ], [ [[C]], [[WHILE_BODY_PREHEADER]] ]
828 ; CHECK-NEXT: [[DEC10]] = add nsw i32 [[DEC10_IN]], -1
829 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[DEC10]] to i64
830 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IDXPROM]]
831 ; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
832 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[IDXPROM]]
833 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[DEC10]], 0
834 ; CHECK-NEXT: br i1 [[TOBOOL]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY]]
835 ; CHECK: while.end.loopexit:
836 ; CHECK-NEXT: br label [[WHILE_END]]
838 ; CHECK-NEXT: ret i32* [[TMP0]]
841 %conv = sext i32 %c to i64
842 %mul = shl nsw i64 %conv, 2
843 %call = tail call noalias i8* @malloc(i64 %mul)
844 %0 = bitcast i8* %call to i32*
845 %tobool.9 = icmp eq i32 %c, 0
846 br i1 %tobool.9, label %while.end, label %while.body.preheader
848 while.body.preheader: ; preds = %entry
851 while.body: ; preds = %while.body.preheader, %while.body
852 %dec10.in = phi i32 [ %dec10, %while.body ], [ %c, %while.body.preheader ]
853 %dec10 = add nsw i32 %dec10.in, -1
854 %idxprom = sext i32 %dec10 to i64
855 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
856 %1 = load i32, i32* %arrayidx, align 4
857 %arrayidx2 = getelementptr inbounds i32, i32* %0, i64 %idxprom
858 store i32 %1, i32* %arrayidx2, align 4
859 %tobool = icmp eq i32 %dec10, 0
860 br i1 %tobool, label %while.end.loopexit, label %while.body
862 while.end.loopexit: ; preds = %while.body
865 while.end: ; preds = %while.end.loopexit, %entry
869 declare noalias i8* @malloc(i64)
871 ; Handle memcpy-able loops with negative stride.
872 ; void test18(unsigned *__restrict__ a, unsigned *__restrict__ b) {
873 ; for (int i = 2047; i >= 0; --i) {
877 define void @test18(i32* noalias nocapture %a, i32* noalias nocapture readonly %b) #0 {
878 ; CHECK-LABEL: @test18(
880 ; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to i8*
881 ; CHECK-NEXT: [[B2:%.*]] = bitcast i32* [[B:%.*]] to i8*
882 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A1]], i8* align 4 [[B2]], i64 8192, i1 false)
883 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
885 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 2047, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
886 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
887 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
888 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
889 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
890 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0
891 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
892 ; CHECK: for.cond.cleanup:
893 ; CHECK-NEXT: ret void
898 for.body: ; preds = %entry, %for.body
899 %indvars.iv = phi i64 [ 2047, %entry ], [ %indvars.iv.next, %for.body ]
900 %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
901 %0 = load i32, i32* %arrayidx, align 4
902 %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
903 store i32 %0, i32* %arrayidx2, align 4
904 %indvars.iv.next = add nsw i64 %indvars.iv, -1
905 %cmp = icmp sgt i64 %indvars.iv, 0
906 br i1 %cmp, label %for.body, label %for.cond.cleanup
908 for.cond.cleanup: ; preds = %for.body
912 ; Two dimensional nested loop with negative stride should be promoted to one big memset.
913 define void @test19(i8* nocapture %X) {
914 ; CHECK-LABEL: @test19(
916 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[X:%.*]], i8 0, i64 10000, i1 false)
917 ; CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]]
918 ; CHECK: for.cond1.preheader:
919 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_INC4:%.*]] ], [ 0, [[ENTRY:%.*]] ]
920 ; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 99, [[ENTRY]] ], [ [[DEC5:%.*]], [[FOR_INC4]] ]
921 ; CHECK-NEXT: [[TMP0:%.*]] = mul nsw i64 [[INDVAR]], -100
922 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 9900
923 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[X]], i64 [[TMP1]]
924 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_06]], 100
925 ; CHECK-NEXT: br label [[FOR_BODY3:%.*]]
927 ; CHECK-NEXT: [[J_05:%.*]] = phi i32 [ 99, [[FOR_COND1_PREHEADER]] ], [ [[DEC:%.*]], [[FOR_BODY3]] ]
928 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[J_05]], [[MUL]]
929 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64
930 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[X]], i64 [[IDXPROM]]
931 ; CHECK-NEXT: [[DEC]] = add nsw i32 [[J_05]], -1
932 ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[J_05]], 0
933 ; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY3]], label [[FOR_INC4]]
935 ; CHECK-NEXT: [[DEC5]] = add nsw i32 [[I_06]], -1
936 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[I_06]], 0
937 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
938 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND1_PREHEADER]], label [[FOR_END6:%.*]]
940 ; CHECK-NEXT: ret void
943 br label %for.cond1.preheader
945 for.cond1.preheader: ; preds = %entry, %for.inc4
946 %i.06 = phi i32 [ 99, %entry ], [ %dec5, %for.inc4 ]
947 %mul = mul nsw i32 %i.06, 100
950 for.body3: ; preds = %for.cond1.preheader, %for.body3
951 %j.05 = phi i32 [ 99, %for.cond1.preheader ], [ %dec, %for.body3 ]
952 %add = add nsw i32 %j.05, %mul
953 %idxprom = sext i32 %add to i64
954 %arrayidx = getelementptr inbounds i8, i8* %X, i64 %idxprom
955 store i8 0, i8* %arrayidx, align 1
956 %dec = add nsw i32 %j.05, -1
957 %cmp2 = icmp sgt i32 %j.05, 0
958 br i1 %cmp2, label %for.body3, label %for.inc4
960 for.inc4: ; preds = %for.body3
961 %dec5 = add nsw i32 %i.06, -1
962 %cmp = icmp sgt i32 %i.06, 0
963 br i1 %cmp, label %for.cond1.preheader, label %for.end6
965 for.end6: ; preds = %for.inc4
969 ; Handle loops where the trip count is a narrow integer that needs to be
971 define void @form_memset_narrow_size(i64* %ptr, i32 %size) {
972 ; CHECK-LABEL: @form_memset_narrow_size(
974 ; CHECK-NEXT: [[PTR1:%.*]] = bitcast i64* [[PTR:%.*]] to i8*
975 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[SIZE:%.*]], 0
976 ; CHECK-NEXT: br i1 [[CMP1]], label [[LOOP_PH:%.*]], label [[EXIT:%.*]]
978 ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SIZE]] to i64
979 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
980 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[PTR1]], i8 0, i64 [[TMP1]], i1 false)
981 ; CHECK-NEXT: br label [[LOOP_BODY:%.*]]
983 ; CHECK-NEXT: [[STOREMERGE4:%.*]] = phi i32 [ 0, [[LOOP_PH]] ], [ [[INC:%.*]], [[LOOP_BODY]] ]
984 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[STOREMERGE4]] to i64
985 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[PTR]], i64 [[IDXPROM]]
986 ; CHECK-NEXT: [[INC]] = add nsw i32 [[STOREMERGE4]], 1
987 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[INC]], [[SIZE]]
988 ; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_BODY]], label [[LOOP_EXIT:%.*]]
990 ; CHECK-NEXT: br label [[EXIT]]
992 ; CHECK-NEXT: ret void
995 %cmp1 = icmp sgt i32 %size, 0
996 br i1 %cmp1, label %loop.ph, label %exit
1002 %storemerge4 = phi i32 [ 0, %loop.ph ], [ %inc, %loop.body ]
1003 %idxprom = sext i32 %storemerge4 to i64
1004 %arrayidx = getelementptr inbounds i64, i64* %ptr, i64 %idxprom
1005 store i64 0, i64* %arrayidx, align 8
1006 %inc = add nsw i32 %storemerge4, 1
1007 %cmp2 = icmp slt i32 %inc, %size
1008 br i1 %cmp2, label %loop.body, label %loop.exit
1017 define void @form_memcpy_narrow_size(i64* noalias %dst, i64* noalias %src, i32 %size) {
1018 ; CHECK-LABEL: @form_memcpy_narrow_size(
1019 ; CHECK-NEXT: entry:
1020 ; CHECK-NEXT: [[DST1:%.*]] = bitcast i64* [[DST:%.*]] to i8*
1021 ; CHECK-NEXT: [[SRC2:%.*]] = bitcast i64* [[SRC:%.*]] to i8*
1022 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[SIZE:%.*]], 0
1023 ; CHECK-NEXT: br i1 [[CMP1]], label [[LOOP_PH:%.*]], label [[EXIT:%.*]]
1025 ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SIZE]] to i64
1026 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
1027 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DST1]], i8* align 8 [[SRC2]], i64 [[TMP1]], i1 false)
1028 ; CHECK-NEXT: br label [[LOOP_BODY:%.*]]
1030 ; CHECK-NEXT: [[STOREMERGE4:%.*]] = phi i32 [ 0, [[LOOP_PH]] ], [ [[INC:%.*]], [[LOOP_BODY]] ]
1031 ; CHECK-NEXT: [[IDXPROM1:%.*]] = sext i32 [[STOREMERGE4]] to i64
1032 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[SRC]], i64 [[IDXPROM1]]
1033 ; CHECK-NEXT: [[V:%.*]] = load i64, i64* [[ARRAYIDX1]], align 8
1034 ; CHECK-NEXT: [[IDXPROM2:%.*]] = sext i32 [[STOREMERGE4]] to i64
1035 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, i64* [[DST]], i64 [[IDXPROM2]]
1036 ; CHECK-NEXT: [[INC]] = add nsw i32 [[STOREMERGE4]], 1
1037 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[INC]], [[SIZE]]
1038 ; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_BODY]], label [[LOOP_EXIT:%.*]]
1040 ; CHECK-NEXT: br label [[EXIT]]
1042 ; CHECK-NEXT: ret void
1045 %cmp1 = icmp sgt i32 %size, 0
1046 br i1 %cmp1, label %loop.ph, label %exit
1052 %storemerge4 = phi i32 [ 0, %loop.ph ], [ %inc, %loop.body ]
1053 %idxprom1 = sext i32 %storemerge4 to i64
1054 %arrayidx1 = getelementptr inbounds i64, i64* %src, i64 %idxprom1
1055 %v = load i64, i64* %arrayidx1, align 8
1056 %idxprom2 = sext i32 %storemerge4 to i64
1057 %arrayidx2 = getelementptr inbounds i64, i64* %dst, i64 %idxprom2
1058 store i64 %v, i64* %arrayidx2, align 8
1059 %inc = add nsw i32 %storemerge4, 1
1060 %cmp2 = icmp slt i32 %inc, %size
1061 br i1 %cmp2, label %loop.body, label %loop.exit
1070 ;; Memmove formation.
1071 define void @PR46179_positive_stride(i8* %Src, i64 %Size) {
1072 ; CHECK-LABEL: @PR46179_positive_stride(
1073 ; CHECK-NEXT: bb.nph:
1074 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1
1075 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SRC]], i8* align 1 [[SCEVGEP]], i64 [[SIZE:%.*]], i1 false)
1076 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1078 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1079 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
1080 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]]
1081 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
1082 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1083 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1084 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
1085 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1087 ; CHECK-NEXT: ret void
1092 for.body: ; preds = %bb.nph, %for.body
1093 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
1094 %Step = add nuw nsw i64 %indvar, 1
1095 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1096 %DestI = getelementptr i8, i8* %Src, i64 %indvar
1097 %V = load i8, i8* %SrcI, align 1
1098 store i8 %V, i8* %DestI, align 1
1099 %indvar.next = add i64 %indvar, 1
1100 %exitcond = icmp eq i64 %indvar.next, %Size
1101 br i1 %exitcond, label %for.end, label %for.body
1103 for.end: ; preds = %for.body, %entry
1107 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
1109 ;; Memmove formation. We expect exactly same memmove result like in PR46179_positive_stride output.
1110 define void @loop_with_memcpy_PR46179_positive_stride(i8* %Src, i64 %Size) {
1111 ; CHECK-LABEL: @loop_with_memcpy_PR46179_positive_stride(
1112 ; CHECK-NEXT: bb.nph:
1113 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1
1114 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SRC]], i8* align 1 [[SCEVGEP]], i64 [[SIZE:%.*]], i1 false)
1115 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1117 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1118 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
1119 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]]
1120 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
1121 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1122 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
1123 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1125 ; CHECK-NEXT: ret void
1130 for.body: ; preds = %bb.nph, %for.body
1131 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
1132 %Step = add nuw nsw i64 %indvar, 1
1133 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1134 %DestI = getelementptr i8, i8* %Src, i64 %indvar
1135 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false)
1136 %indvar.next = add i64 %indvar, 1
1137 %exitcond = icmp eq i64 %indvar.next, %Size
1138 br i1 %exitcond, label %for.end, label %for.body
1140 for.end: ; preds = %for.body, %entry
1144 ;; Memmove formation.
1145 define void @PR46179_negative_stride(i8* %Src, i64 %Size) {
1146 ; CHECK-LABEL: @PR46179_negative_stride(
1147 ; CHECK-NEXT: bb.nph:
1148 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0
1149 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
1150 ; CHECK: for.body.preheader:
1151 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1
1152 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SCEVGEP]], i8* align 1 [[SRC]], i64 [[SIZE]], i1 false)
1153 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1155 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[STEP:%.*]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ]
1156 ; CHECK-NEXT: [[STEP]] = add nsw i64 [[INDVAR]], -1
1157 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[STEP]]
1158 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1159 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]]
1160 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1
1161 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]]
1162 ; CHECK: for.end.loopexit:
1163 ; CHECK-NEXT: br label [[FOR_END]]
1165 ; CHECK-NEXT: ret void
1168 %cmp1 = icmp sgt i64 %Size, 0
1169 br i1 %cmp1, label %for.body, label %for.end
1171 for.body: ; preds = %bb.nph, %.for.body
1172 %indvar = phi i64 [ %Step, %for.body ], [ %Size, %bb.nph ]
1173 %Step = add nsw i64 %indvar, -1
1174 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step
1175 %V = load i8, i8* %SrcI, align 1
1176 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar
1177 store i8 %V, i8* %DestI, align 1
1178 %exitcond = icmp sgt i64 %indvar, 1
1179 br i1 %exitcond, label %for.body, label %for.end
1181 for.end: ; preds = %.for.body, %bb.nph
1185 ;; Memmove formation. We expect exactly same memmove result like in PR46179_negative_stride output.
1186 define void @loop_with_memcpy_PR46179_negative_stride(i8* %Src, i64 %Size) {
1187 ; CHECK-LABEL: @loop_with_memcpy_PR46179_negative_stride(
1188 ; CHECK-NEXT: bb.nph:
1189 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0
1190 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
1191 ; CHECK: for.body.preheader:
1192 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1
1193 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SCEVGEP]], i8* align 1 [[SRC]], i64 [[SIZE]], i1 false)
1194 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1196 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[STEP]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ]
1197 ; CHECK-NEXT: [[STEP:%.*]] = add nsw i64 [[INDVAR]], -1
1198 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[STEP]]
1199 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]]
1200 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1
1201 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END]]
1203 ; CHECK-NEXT: ret void
1206 %cmp1 = icmp sgt i64 %Size, 0
1207 br i1 %cmp1, label %for.body, label %for.end
1209 for.body: ; preds = %bb.nph, %.for.body
1210 %indvar = phi i64 [ %Step, %for.body ], [ %Size, %bb.nph ]
1211 %Step = add nsw i64 %indvar, -1
1212 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step
1213 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar
1214 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false)
1215 %exitcond = icmp sgt i64 %indvar, 1
1216 br i1 %exitcond, label %for.body, label %for.end
1218 for.end: ; preds = %.for.body, %bb.nph
1222 ;; Memmove formation.
1223 define void @loop_with_memcpy_stride16(i8* %Src, i64 %Size) {
1224 ; CHECK-LABEL: @loop_with_memcpy_stride16(
1225 ; CHECK-NEXT: bb.nph:
1226 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 16
1227 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[SIZE:%.*]], i64 16)
1228 ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[SMAX]], -1
1229 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 4
1230 ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
1231 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw i64 [[TMP2]], 16
1232 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SRC]], i8* align 1 [[SCEVGEP]], i64 [[TMP3]], i1 false)
1233 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1235 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[STEP]], [[FOR_BODY]] ], [ 0, [[BB_NPH:%.*]] ]
1236 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 16
1237 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[STEP]]
1238 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]]
1239 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[STEP]], [[SIZE:%.*]]
1240 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END:%.*]]
1242 ; CHECK-NEXT: ret void
1247 for.body: ; preds = %for.body, %bb.nph
1248 %indvar = phi i64 [ %Step, %for.body ], [ 0, %bb.nph ]
1249 %Step = add nuw nsw i64 %indvar, 16
1250 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step
1251 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar
1252 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 16, i1 false)
1253 %exitcond = icmp slt i64 %Step, %Size
1254 br i1 %exitcond, label %for.body, label %for.end
1256 for.end: ; preds = %for.body
1260 ;; Do not form memmove from previous load when stride is positive.
1261 define void @do_not_form_memmove1(i8* %Src, i64 %Size) {
1262 ; CHECK-LABEL: @do_not_form_memmove1(
1263 ; CHECK-NEXT: bb.nph:
1264 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1266 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 1, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1267 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], -1
1268 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 [[STEP]]
1269 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
1270 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1271 ; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1
1272 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1273 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
1274 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1276 ; CHECK-NEXT: ret void
1281 for.body: ; preds = %bb.nph, %for.body
1282 %indvar = phi i64 [ 1, %bb.nph ], [ %indvar.next, %for.body ]
1283 %Step = add nuw nsw i64 %indvar, -1
1284 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1285 %DestI = getelementptr i8, i8* %Src, i64 %indvar
1286 %V = load i8, i8* %SrcI, align 1
1287 store i8 %V, i8* %DestI, align 1
1288 %indvar.next = add i64 %indvar, 1
1289 %exitcond = icmp eq i64 %indvar.next, %Size
1290 br i1 %exitcond, label %for.end, label %for.body
1292 for.end: ; preds = %for.body, %entry
1296 ;; Do not form memmove from previous load in memcpy when stride is positive.
1297 define void @do_not_form_memmove2(i8* %Src, i64 %Size) {
1298 ; CHECK-LABEL: @do_not_form_memmove2(
1299 ; CHECK-NEXT: bb.nph:
1300 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1302 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 1, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1303 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], -1
1304 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 [[STEP]]
1305 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
1306 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DESTI]], i8* align 1 [[SRCI]], i64 1, i1 false)
1307 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1308 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
1309 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1311 ; CHECK-NEXT: ret void
1316 for.body: ; preds = %bb.nph, %for.body
1317 %indvar = phi i64 [ 1, %bb.nph ], [ %indvar.next, %for.body ]
1318 %Step = add nuw nsw i64 %indvar, -1
1319 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1320 %DestI = getelementptr i8, i8* %Src, i64 %indvar
1321 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false)
1322 %indvar.next = add i64 %indvar, 1
1323 %exitcond = icmp eq i64 %indvar.next, %Size
1324 br i1 %exitcond, label %for.end, label %for.body
1326 for.end: ; preds = %for.body, %entry
1330 ;; Do not form memmove from next load when stride is negative.
1331 define void @do_not_form_memmove3(i8* %Src, i64 %Size) {
1332 ; CHECK-LABEL: @do_not_form_memmove3(
1333 ; CHECK-NEXT: bb.nph:
1334 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0
1335 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
1336 ; CHECK: for.body.preheader:
1337 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1339 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ]
1340 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
1341 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[STEP]]
1342 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1343 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]]
1344 ; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1
1345 ; CHECK-NEXT: [[INDVAR_NEXT:%.*]] = add nsw i64 [[INDVAR]], -1
1346 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1
1347 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END]]
1349 ; CHECK-NEXT: ret void
1352 %cmp1 = icmp sgt i64 %Size, 0
1353 br i1 %cmp1, label %for.body, label %for.end
1355 for.body: ; preds = %bb.nph, %.for.body
1356 %indvar = phi i64 [ %indvar.next, %for.body ], [ %Size, %bb.nph ]
1357 %Step = add nuw nsw i64 %indvar, 1
1358 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step
1359 %V = load i8, i8* %SrcI, align 1
1360 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar
1361 store i8 %V, i8* %DestI, align 1
1362 %indvar.next = add nsw i64 %indvar, -1
1363 %exitcond = icmp sgt i64 %indvar, 1
1364 br i1 %exitcond, label %for.body, label %for.end
1366 for.end: ; preds = %.for.body, %bb.nph
1370 ;; Do not form memmove from next load in memcpy when stride is negative.
1371 define void @do_not_form_memmove4(i8* %Src, i64 %Size) {
1372 ; CHECK-LABEL: @do_not_form_memmove4(
1373 ; CHECK-NEXT: bb.nph:
1374 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0
1375 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
1376 ; CHECK: for.body.preheader:
1377 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1379 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ]
1380 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
1381 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[STEP]]
1382 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]]
1383 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DESTI]], i8* align 1 [[SRCI]], i64 1, i1 false)
1384 ; CHECK-NEXT: [[INDVAR_NEXT:%.*]] = add nsw i64 [[INDVAR]], -1
1385 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1
1386 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END]]
1388 ; CHECK-NEXT: ret void
1391 %cmp1 = icmp sgt i64 %Size, 0
1392 br i1 %cmp1, label %for.body, label %for.end
1394 for.body: ; preds = %bb.nph, %.for.body
1395 %indvar = phi i64 [ %indvar.next, %for.body ], [ %Size, %bb.nph ]
1396 %Step = add nuw nsw i64 %indvar, 1
1397 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step
1398 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar
1399 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false)
1400 %indvar.next = add nsw i64 %indvar, -1
1401 %exitcond = icmp sgt i64 %indvar, 1
1402 br i1 %exitcond, label %for.body, label %for.end
1404 for.end: ; preds = %.for.body, %bb.nph
1408 ;; Do not form memmove when underaligned load is overlapped with store.
1409 define void @do_not_form_memmove5(i32* %s, i64 %size) {
1410 ; CHECK-LABEL: @do_not_form_memmove5(
1411 ; CHECK-NEXT: entry:
1412 ; CHECK-NEXT: [[END_IDX:%.*]] = add i64 [[SIZE:%.*]], -1
1413 ; CHECK-NEXT: [[END_PTR:%.*]] = getelementptr inbounds i32, i32* [[S:%.*]], i64 [[END_IDX]]
1414 ; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
1415 ; CHECK: while.body:
1416 ; CHECK-NEXT: [[PHI_PTR:%.*]] = phi i32* [ [[S]], [[ENTRY:%.*]] ], [ [[NEXT_PTR:%.*]], [[WHILE_BODY]] ]
1417 ; CHECK-NEXT: [[NEXT:%.*]] = bitcast i32* [[PHI_PTR]] to i16*
1418 ; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr i16, i16* [[NEXT]], i64 1
1419 ; CHECK-NEXT: [[SRC_PTR2:%.*]] = bitcast i16* [[SRC_PTR]] to i32*
1420 ; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[SRC_PTR2]], align 2
1421 ; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr i32, i32* [[PHI_PTR]], i64 0
1422 ; CHECK-NEXT: store i32 [[VAL]], i32* [[DST_PTR]], align 4
1423 ; CHECK-NEXT: [[NEXT_PTR]] = getelementptr i32, i32* [[PHI_PTR]], i64 1
1424 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[NEXT_PTR]], [[END_PTR]]
1425 ; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[WHILE_BODY]]
1427 ; CHECK-NEXT: ret void
1430 %end.idx = add i64 %size, -1
1431 %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx
1432 br label %while.body
1435 %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
1436 %next = bitcast i32* %phi.ptr to i16*
1437 %src.ptr = getelementptr i16, i16* %next, i64 1
1438 %src.ptr2 = bitcast i16* %src.ptr to i32*
1439 ; below underaligned load is overlapped with store.
1440 %val = load i32, i32* %src.ptr2, align 2
1441 %dst.ptr = getelementptr i32, i32* %phi.ptr, i64 0
1442 store i32 %val, i32* %dst.ptr, align 4
1443 %next.ptr = getelementptr i32, i32* %phi.ptr, i64 1
1444 %cmp = icmp eq i32* %next.ptr, %end.ptr
1445 br i1 %cmp, label %exit, label %while.body
1451 ;; Do not form memmove for memcpy with aliasing store.
1452 define void @do_not_form_memmove6(i8* %Src, i64 %Size) {
1453 ; CHECK-LABEL: @do_not_form_memmove6(
1454 ; CHECK-NEXT: bb.nph:
1455 ; CHECK-NEXT: [[BASEALIAS:%.*]] = call i8* @external(i8* [[SRC:%.*]])
1456 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1458 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1459 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
1460 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 [[STEP]]
1461 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
1462 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DESTI]], i8* align 1 [[SRCI]], i64 1, i1 false)
1463 ; CHECK-NEXT: store i8 4, i8* [[BASEALIAS]], align 1
1464 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1465 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
1466 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1468 ; CHECK-NEXT: ret void
1471 %BaseAlias = call i8* @external(i8* %Src)
1474 for.body: ; preds = %bb.nph, %for.body
1475 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
1476 %Step = add nuw nsw i64 %indvar, 1
1477 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1478 %DestI = getelementptr i8, i8* %Src, i64 %indvar
1479 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false)
1480 store i8 4, i8* %BaseAlias
1481 %indvar.next = add i64 %indvar, 1
1482 %exitcond = icmp eq i64 %indvar.next, %Size
1483 br i1 %exitcond, label %for.end, label %for.body
1485 for.end: ; preds = %for.body, %entry
1489 ;; Do not form memmove when load has more than one use.
1490 define i32 @do_not_form_memmove7(i32* %p) {
1491 ; CHECK-LABEL: @do_not_form_memmove7(
1492 ; CHECK-NEXT: entry:
1493 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1494 ; CHECK: for.cond.cleanup:
1495 ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ]
1496 ; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
1498 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 15, [[ENTRY:%.*]] ], [ [[SUB:%.*]], [[FOR_BODY]] ]
1499 ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD]], [[FOR_BODY]] ]
1500 ; CHECK-NEXT: [[SUB]] = add nsw i32 [[INDEX]], -1
1501 ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SUB]] to i64
1502 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[TMP0]]
1503 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
1504 ; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[INDEX]] to i64
1505 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 [[IDXPROM]]
1506 ; CHECK-NEXT: store i32 [[TMP1]], i32* [[ARRAYIDX2]], align 4
1507 ; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP1]], [[SUM]]
1508 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[INDEX]], 1
1509 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
1514 for.cond.cleanup: ; preds = %for.body
1515 %add.lcssa = phi i32 [ %add, %for.body ]
1518 for.body: ; preds = %entry, %for.body
1519 %index = phi i32 [ 15, %entry ], [ %sub, %for.body ]
1520 %sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
1521 %sub = add nsw i32 %index, -1
1522 %0 = zext i32 %sub to i64
1523 %arrayidx = getelementptr inbounds i32, i32* %p, i64 %0
1524 %1 = load i32, i32* %arrayidx, align 4
1525 %idxprom = zext i32 %index to i64
1526 %arrayidx2 = getelementptr inbounds i32, i32* %p, i64 %idxprom
1527 store i32 %1, i32* %arrayidx2, align 4
1528 %add = add nsw i32 %1, %sum
1529 %cmp = icmp sgt i32 %index, 1
1530 br i1 %cmp, label %for.body, label %for.cond.cleanup
1533 ;; Memcpy formation is still preferred over memmove.
1534 define void @prefer_memcpy_over_memmove(i8* noalias %Src, i8* noalias %Dest, i64 %Size) {
1535 ; CHECK-LABEL: @prefer_memcpy_over_memmove(
1536 ; CHECK-NEXT: bb.nph:
1537 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 42
1538 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST:%.*]], i8* align 1 [[SCEVGEP]], i64 [[SIZE:%.*]], i1 false)
1539 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1541 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1542 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 42
1543 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]]
1544 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
1545 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1546 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1547 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
1548 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1550 ; CHECK-NEXT: ret void
1555 for.body: ; preds = %bb.nph, %for.body
1556 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
1557 %Step = add nuw nsw i64 %indvar, 42
1558 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1559 %DestI = getelementptr i8, i8* %Dest, i64 %indvar
1560 %V = load i8, i8* %SrcI, align 1
1561 store i8 %V, i8* %DestI, align 1
1562 %indvar.next = add i64 %indvar, 1
1563 %exitcond = icmp eq i64 %indvar.next, %Size
1564 br i1 %exitcond, label %for.end, label %for.body
1566 for.end: ; preds = %for.body, %entry
1570 ; Validate that "memset_pattern" has the proper attributes.