1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -O3 -mtriple=x86_64-unknown-unknown -mcpu=core2 | FileCheck %s -check-prefix=X64
3 ; RUN: llc < %s -O3 -mtriple=i686-unknown-unknown -mcpu=core2 | FileCheck %s -check-prefix=X32
5 ; @simple is the most basic chain of address induction variables. Chaining
6 ; saves at least one register and avoids complex addressing and setup
10 ; no other address computation in the preheader
11 ; no complex address modes
13 ; no expensive address computation in the preheader
14 ; no complex address modes
16 define i32 @simple(ptr %a, ptr %b, i32 %x) nounwind {
18 ; X64: # %bb.0: # %entry
19 ; X64-NEXT: movslq %edx, %rcx
20 ; X64-NEXT: shlq $2, %rcx
21 ; X64-NEXT: xorl %eax, %eax
22 ; X64-NEXT: leaq (%rcx,%rcx), %rdx
23 ; X64-NEXT: .p2align 4, 0x90
24 ; X64-NEXT: .LBB0_1: # %loop
25 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
26 ; X64-NEXT: addl (%rdi), %eax
27 ; X64-NEXT: addl (%rdi,%rcx), %eax
28 ; X64-NEXT: leaq (%rdi,%rcx), %r8
29 ; X64-NEXT: addl (%rcx,%r8), %eax
30 ; X64-NEXT: addq %rcx, %r8
31 ; X64-NEXT: addl (%rcx,%r8), %eax
32 ; X64-NEXT: addq %rdx, %r8
33 ; X64-NEXT: movq %r8, %rdi
34 ; X64-NEXT: cmpq %rsi, %r8
35 ; X64-NEXT: jne .LBB0_1
36 ; X64-NEXT: # %bb.2: # %exit
40 ; X32: # %bb.0: # %entry
41 ; X32-NEXT: pushl %ebx
42 ; X32-NEXT: pushl %edi
43 ; X32-NEXT: pushl %esi
44 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
45 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
46 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
47 ; X32-NEXT: shll $2, %edx
48 ; X32-NEXT: xorl %eax, %eax
49 ; X32-NEXT: leal (%edx,%edx), %esi
50 ; X32-NEXT: .p2align 4, 0x90
51 ; X32-NEXT: .LBB0_1: # %loop
52 ; X32-NEXT: # =>This Inner Loop Header: Depth=1
53 ; X32-NEXT: addl (%edi), %eax
54 ; X32-NEXT: addl (%edi,%edx), %eax
55 ; X32-NEXT: leal (%edi,%edx), %ebx
56 ; X32-NEXT: addl (%edx,%ebx), %eax
57 ; X32-NEXT: addl %edx, %ebx
58 ; X32-NEXT: addl (%edx,%ebx), %eax
59 ; X32-NEXT: addl %esi, %ebx
60 ; X32-NEXT: movl %ebx, %edi
61 ; X32-NEXT: cmpl %ecx, %ebx
62 ; X32-NEXT: jne .LBB0_1
63 ; X32-NEXT: # %bb.2: # %exit
71 %iv = phi ptr [ %a, %entry ], [ %iv4, %loop ]
72 %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
73 %v = load i32, ptr %iv
74 %iv1 = getelementptr inbounds i32, ptr %iv, i32 %x
75 %v1 = load i32, ptr %iv1
76 %iv2 = getelementptr inbounds i32, ptr %iv1, i32 %x
77 %v2 = load i32, ptr %iv2
78 %iv3 = getelementptr inbounds i32, ptr %iv2, i32 %x
79 %v3 = load i32, ptr %iv3
81 %s2 = add i32 %s1, %v1
82 %s3 = add i32 %s2, %v2
83 %s4 = add i32 %s3, %v3
84 %iv4 = getelementptr inbounds i32, ptr %iv3, i32 %x
85 %cmp = icmp eq ptr %iv4, %b
86 br i1 %cmp, label %exit, label %loop
91 ; @user is not currently chained because the IV is live across memory ops.
93 ; expensive address computation in the preheader
94 ; complex address modes
95 define i32 @user(ptr %a, ptr %b, i32 %x) nounwind {
97 ; X64: # %bb.0: # %entry
98 ; X64-NEXT: movslq %edx, %rcx
99 ; X64-NEXT: movq %rcx, %rdx
100 ; X64-NEXT: shlq $4, %rdx
101 ; X64-NEXT: leaq (,%rcx,4), %rax
102 ; X64-NEXT: leaq (%rax,%rax,2), %r8
103 ; X64-NEXT: xorl %eax, %eax
104 ; X64-NEXT: .p2align 4, 0x90
105 ; X64-NEXT: .LBB1_1: # %loop
106 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
107 ; X64-NEXT: addl (%rdi), %eax
108 ; X64-NEXT: addl (%rdi,%rcx,4), %eax
109 ; X64-NEXT: addl (%rdi,%rcx,8), %eax
110 ; X64-NEXT: addl (%rdi,%r8), %eax
111 ; X64-NEXT: movl %eax, (%rdi)
112 ; X64-NEXT: addq %rdx, %rdi
113 ; X64-NEXT: cmpq %rsi, %rdi
114 ; X64-NEXT: jne .LBB1_1
115 ; X64-NEXT: # %bb.2: # %exit
119 ; X32: # %bb.0: # %entry
120 ; X32-NEXT: pushl %ebx
121 ; X32-NEXT: pushl %edi
122 ; X32-NEXT: pushl %esi
123 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
124 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
125 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
126 ; X32-NEXT: movl %ecx, %edi
127 ; X32-NEXT: shll $4, %edi
128 ; X32-NEXT: leal (,%ecx,4), %eax
129 ; X32-NEXT: leal (%eax,%eax,2), %ebx
130 ; X32-NEXT: xorl %eax, %eax
131 ; X32-NEXT: .p2align 4, 0x90
132 ; X32-NEXT: .LBB1_1: # %loop
133 ; X32-NEXT: # =>This Inner Loop Header: Depth=1
134 ; X32-NEXT: addl (%esi), %eax
135 ; X32-NEXT: addl (%esi,%ecx,4), %eax
136 ; X32-NEXT: addl (%esi,%ecx,8), %eax
137 ; X32-NEXT: addl (%esi,%ebx), %eax
138 ; X32-NEXT: movl %eax, (%esi)
139 ; X32-NEXT: addl %edi, %esi
140 ; X32-NEXT: cmpl %edx, %esi
141 ; X32-NEXT: jne .LBB1_1
142 ; X32-NEXT: # %bb.2: # %exit
143 ; X32-NEXT: popl %esi
144 ; X32-NEXT: popl %edi
145 ; X32-NEXT: popl %ebx
150 %iv = phi ptr [ %a, %entry ], [ %iv4, %loop ]
151 %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
152 %v = load i32, ptr %iv
153 %iv1 = getelementptr inbounds i32, ptr %iv, i32 %x
154 %v1 = load i32, ptr %iv1
155 %iv2 = getelementptr inbounds i32, ptr %iv1, i32 %x
156 %v2 = load i32, ptr %iv2
157 %iv3 = getelementptr inbounds i32, ptr %iv2, i32 %x
158 %v3 = load i32, ptr %iv3
160 %s2 = add i32 %s1, %v1
161 %s3 = add i32 %s2, %v2
162 %s4 = add i32 %s3, %v3
163 %iv4 = getelementptr inbounds i32, ptr %iv3, i32 %x
164 store i32 %s4, ptr %iv
165 %cmp = icmp eq ptr %iv4, %b
166 br i1 %cmp, label %exit, label %loop
171 ; @extrastride is a slightly more interesting case of a single
172 ; complete chain with multiple strides. The test case IR is what LSR
173 ; used to do, and exactly what we don't want to do. LSR's new IV
174 ; chaining feature should now undo the damage.
176 ; We currently don't handle this on X64 because the sexts cause
177 ; strange increment expressions like this:
178 ; IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
180 ; For x32, no spills in the preheader, no complex address modes, no reloads.
182 define void @extrastride(ptr nocapture %main, i32 %main_stride, ptr nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {
183 ; X64-LABEL: extrastride:
184 ; X64: # %bb.0: # %entry
185 ; X64-NEXT: # kill: def $ecx killed $ecx def $rcx
186 ; X64-NEXT: # kill: def $esi killed $esi def $rsi
187 ; X64-NEXT: testl %r9d, %r9d
188 ; X64-NEXT: je .LBB2_4
189 ; X64-NEXT: # %bb.1: # %for.body.lr.ph
190 ; X64-NEXT: pushq %rbx
191 ; X64-NEXT: leal (%rsi,%rsi), %r10d
192 ; X64-NEXT: leal (%rsi,%rsi,2), %r11d
193 ; X64-NEXT: addl %esi, %ecx
194 ; X64-NEXT: leal (,%rsi,4), %eax
195 ; X64-NEXT: leal (%rcx,%rsi,4), %ebx
197 ; X64-NEXT: movslq %r11d, %rcx
198 ; X64-NEXT: movslq %r10d, %r10
199 ; X64-NEXT: movslq %esi, %rsi
200 ; X64-NEXT: movslq %r8d, %r8
201 ; X64-NEXT: shlq $2, %r8
202 ; X64-NEXT: movslq %ebx, %r11
203 ; X64-NEXT: .p2align 4, 0x90
204 ; X64-NEXT: .LBB2_2: # %for.body
205 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
206 ; X64-NEXT: movl (%rdi,%rsi), %ebx
207 ; X64-NEXT: addl (%rdi), %ebx
208 ; X64-NEXT: addl (%rdi,%r10), %ebx
209 ; X64-NEXT: addl (%rdi,%rcx), %ebx
210 ; X64-NEXT: addl (%rdi,%rax), %ebx
211 ; X64-NEXT: movl %ebx, (%rdx)
212 ; X64-NEXT: addq %r11, %rdi
213 ; X64-NEXT: addq %r8, %rdx
214 ; X64-NEXT: decl %r9d
215 ; X64-NEXT: jne .LBB2_2
217 ; X64-NEXT: popq %rbx
218 ; X64-NEXT: .LBB2_4: # %for.end
221 ; X32-LABEL: extrastride:
222 ; X32: # %bb.0: # %entry
223 ; X32-NEXT: pushl %ebp
224 ; X32-NEXT: pushl %ebx
225 ; X32-NEXT: pushl %edi
226 ; X32-NEXT: pushl %esi
227 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
228 ; X32-NEXT: testl %eax, %eax
229 ; X32-NEXT: je .LBB2_3
230 ; X32-NEXT: # %bb.1: # %for.body.lr.ph
231 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
232 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
233 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
234 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
235 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
236 ; X32-NEXT: addl %esi, %edi
237 ; X32-NEXT: shll $2, %ecx
238 ; X32-NEXT: .p2align 4, 0x90
239 ; X32-NEXT: .LBB2_2: # %for.body
240 ; X32-NEXT: # =>This Inner Loop Header: Depth=1
241 ; X32-NEXT: movl (%ebx,%esi), %ebp
242 ; X32-NEXT: addl (%ebx), %ebp
243 ; X32-NEXT: addl %esi, %ebx
244 ; X32-NEXT: addl (%esi,%ebx), %ebp
245 ; X32-NEXT: addl %esi, %ebx
246 ; X32-NEXT: addl (%esi,%ebx), %ebp
247 ; X32-NEXT: addl %esi, %ebx
248 ; X32-NEXT: addl (%esi,%ebx), %ebp
249 ; X32-NEXT: movl %ebp, (%edx)
250 ; X32-NEXT: addl %esi, %ebx
251 ; X32-NEXT: addl %edi, %ebx
252 ; X32-NEXT: addl %ecx, %edx
253 ; X32-NEXT: decl %eax
254 ; X32-NEXT: jne .LBB2_2
255 ; X32-NEXT: .LBB2_3: # %for.end
256 ; X32-NEXT: popl %esi
257 ; X32-NEXT: popl %edi
258 ; X32-NEXT: popl %ebx
259 ; X32-NEXT: popl %ebp
262 %cmp8 = icmp eq i32 %z, 0
263 br i1 %cmp8, label %for.end, label %for.body.lr.ph
265 for.body.lr.ph: ; preds = %entry
266 %add.ptr.sum = shl i32 %main_stride, 1 ; s*2
267 %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3
268 %add.ptr2.sum = add i32 %x, %main_stride ; s + x
269 %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4
270 %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x
273 for.body: ; preds = %for.body.lr.ph, %for.body
274 %main.addr.011 = phi ptr [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
275 %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
276 %res.addr.09 = phi ptr [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
277 %0 = load i32, ptr %main.addr.011, align 4
278 %add.ptr = getelementptr inbounds i8, ptr %main.addr.011, i32 %main_stride
279 %1 = load i32, ptr %add.ptr, align 4
280 %add.ptr1 = getelementptr inbounds i8, ptr %main.addr.011, i32 %add.ptr.sum
281 %2 = load i32, ptr %add.ptr1, align 4
282 %add.ptr2 = getelementptr inbounds i8, ptr %main.addr.011, i32 %add.ptr1.sum
283 %3 = load i32, ptr %add.ptr2, align 4
284 %add.ptr3 = getelementptr inbounds i8, ptr %main.addr.011, i32 %add.ptr4.sum
285 %4 = load i32, ptr %add.ptr3, align 4
286 %add = add i32 %1, %0
287 %add4 = add i32 %add, %2
288 %add5 = add i32 %add4, %3
289 %add6 = add i32 %add5, %4
290 store i32 %add6, ptr %res.addr.09, align 4
291 %add.ptr6 = getelementptr inbounds i8, ptr %main.addr.011, i32 %add.ptr3.sum
292 %add.ptr7 = getelementptr inbounds i32, ptr %res.addr.09, i32 %y
293 %inc = add i32 %i.010, 1
294 %cmp = icmp eq i32 %inc, %z
295 br i1 %cmp, label %for.end, label %for.body
297 for.end: ; preds = %for.body, %entry
301 ; @foldedidx is an unrolled variant of this loop:
302 ; for (unsigned long i = 0; i < len; i += s) {
303 ; c[i] = a[i] + b[i];
305 ; where 's' can be folded into the addressing mode.
306 ; Consequently, we should *not* form any chains.
308 define void @foldedidx(ptr nocapture %a, ptr nocapture %b, ptr nocapture %c) nounwind ssp {
309 ; X64-LABEL: foldedidx:
310 ; X64: # %bb.0: # %entry
311 ; X64-NEXT: movl $3, %eax
312 ; X64-NEXT: .p2align 4, 0x90
313 ; X64-NEXT: .LBB3_1: # %for.body
314 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
315 ; X64-NEXT: movzbl -3(%rdi,%rax), %ecx
316 ; X64-NEXT: movzbl -3(%rsi,%rax), %r8d
317 ; X64-NEXT: addl %ecx, %r8d
318 ; X64-NEXT: movb %r8b, -3(%rdx,%rax)
319 ; X64-NEXT: movzbl -2(%rdi,%rax), %ecx
320 ; X64-NEXT: movzbl -2(%rsi,%rax), %r8d
321 ; X64-NEXT: addl %ecx, %r8d
322 ; X64-NEXT: movb %r8b, -2(%rdx,%rax)
323 ; X64-NEXT: movzbl -1(%rdi,%rax), %ecx
324 ; X64-NEXT: movzbl -1(%rsi,%rax), %r8d
325 ; X64-NEXT: addl %ecx, %r8d
326 ; X64-NEXT: movb %r8b, -1(%rdx,%rax)
327 ; X64-NEXT: movzbl (%rdi,%rax), %ecx
328 ; X64-NEXT: movzbl (%rsi,%rax), %r8d
329 ; X64-NEXT: addl %ecx, %r8d
330 ; X64-NEXT: movb %r8b, (%rdx,%rax)
331 ; X64-NEXT: addq $4, %rax
332 ; X64-NEXT: cmpl $403, %eax # imm = 0x193
333 ; X64-NEXT: jne .LBB3_1
334 ; X64-NEXT: # %bb.2: # %for.end
337 ; X32-LABEL: foldedidx:
338 ; X32: # %bb.0: # %entry
339 ; X32-NEXT: pushl %ebx
340 ; X32-NEXT: pushl %edi
341 ; X32-NEXT: pushl %esi
342 ; X32-NEXT: movl $3, %eax
343 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
344 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
345 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
346 ; X32-NEXT: .p2align 4, 0x90
347 ; X32-NEXT: .LBB3_1: # %for.body
348 ; X32-NEXT: # =>This Inner Loop Header: Depth=1
349 ; X32-NEXT: movzbl -3(%esi,%eax), %edi
350 ; X32-NEXT: movzbl -3(%edx,%eax), %ebx
351 ; X32-NEXT: addl %edi, %ebx
352 ; X32-NEXT: movb %bl, -3(%ecx,%eax)
353 ; X32-NEXT: movzbl -2(%esi,%eax), %edi
354 ; X32-NEXT: movzbl -2(%edx,%eax), %ebx
355 ; X32-NEXT: addl %edi, %ebx
356 ; X32-NEXT: movb %bl, -2(%ecx,%eax)
357 ; X32-NEXT: movzbl -1(%esi,%eax), %edi
358 ; X32-NEXT: movzbl -1(%edx,%eax), %ebx
359 ; X32-NEXT: addl %edi, %ebx
360 ; X32-NEXT: movb %bl, -1(%ecx,%eax)
361 ; X32-NEXT: movzbl (%esi,%eax), %edi
362 ; X32-NEXT: movzbl (%edx,%eax), %ebx
363 ; X32-NEXT: addl %edi, %ebx
364 ; X32-NEXT: movb %bl, (%ecx,%eax)
365 ; X32-NEXT: addl $4, %eax
366 ; X32-NEXT: cmpl $403, %eax # imm = 0x193
367 ; X32-NEXT: jne .LBB3_1
368 ; X32-NEXT: # %bb.2: # %for.end
369 ; X32-NEXT: popl %esi
370 ; X32-NEXT: popl %edi
371 ; X32-NEXT: popl %ebx
376 for.body: ; preds = %for.body, %entry
377 %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
378 %arrayidx = getelementptr inbounds i8, ptr %a, i32 %i.07
379 %0 = load i8, ptr %arrayidx, align 1
380 %conv5 = zext i8 %0 to i32
381 %arrayidx1 = getelementptr inbounds i8, ptr %b, i32 %i.07
382 %1 = load i8, ptr %arrayidx1, align 1
383 %conv26 = zext i8 %1 to i32
384 %add = add nsw i32 %conv26, %conv5
385 %conv3 = trunc i32 %add to i8
386 %arrayidx4 = getelementptr inbounds i8, ptr %c, i32 %i.07
387 store i8 %conv3, ptr %arrayidx4, align 1
388 %inc1 = or disjoint i32 %i.07, 1
389 %arrayidx.1 = getelementptr inbounds i8, ptr %a, i32 %inc1
390 %2 = load i8, ptr %arrayidx.1, align 1
391 %conv5.1 = zext i8 %2 to i32
392 %arrayidx1.1 = getelementptr inbounds i8, ptr %b, i32 %inc1
393 %3 = load i8, ptr %arrayidx1.1, align 1
394 %conv26.1 = zext i8 %3 to i32
395 %add.1 = add nsw i32 %conv26.1, %conv5.1
396 %conv3.1 = trunc i32 %add.1 to i8
397 %arrayidx4.1 = getelementptr inbounds i8, ptr %c, i32 %inc1
398 store i8 %conv3.1, ptr %arrayidx4.1, align 1
399 %inc.12 = or disjoint i32 %i.07, 2
400 %arrayidx.2 = getelementptr inbounds i8, ptr %a, i32 %inc.12
401 %4 = load i8, ptr %arrayidx.2, align 1
402 %conv5.2 = zext i8 %4 to i32
403 %arrayidx1.2 = getelementptr inbounds i8, ptr %b, i32 %inc.12
404 %5 = load i8, ptr %arrayidx1.2, align 1
405 %conv26.2 = zext i8 %5 to i32
406 %add.2 = add nsw i32 %conv26.2, %conv5.2
407 %conv3.2 = trunc i32 %add.2 to i8
408 %arrayidx4.2 = getelementptr inbounds i8, ptr %c, i32 %inc.12
409 store i8 %conv3.2, ptr %arrayidx4.2, align 1
410 %inc.23 = or disjoint i32 %i.07, 3
411 %arrayidx.3 = getelementptr inbounds i8, ptr %a, i32 %inc.23
412 %6 = load i8, ptr %arrayidx.3, align 1
413 %conv5.3 = zext i8 %6 to i32
414 %arrayidx1.3 = getelementptr inbounds i8, ptr %b, i32 %inc.23
415 %7 = load i8, ptr %arrayidx1.3, align 1
416 %conv26.3 = zext i8 %7 to i32
417 %add.3 = add nsw i32 %conv26.3, %conv5.3
418 %conv3.3 = trunc i32 %add.3 to i8
419 %arrayidx4.3 = getelementptr inbounds i8, ptr %c, i32 %inc.23
420 store i8 %conv3.3, ptr %arrayidx4.3, align 1
421 %inc.3 = add nsw i32 %i.07, 4
422 %exitcond.3 = icmp eq i32 %inc.3, 400
423 br i1 %exitcond.3, label %for.end, label %for.body
425 for.end: ; preds = %for.body
429 ; @multioper tests instructions with multiple IV user operands. We
430 ; should be able to chain them independent of each other.
432 define void @multioper(ptr %a, i32 %n) nounwind {
433 ; X64-LABEL: multioper:
434 ; X64: # %bb.0: # %entry
435 ; X64-NEXT: xorl %eax, %eax
436 ; X64-NEXT: .p2align 4, 0x90
437 ; X64-NEXT: .LBB4_1: # %for.body
438 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
439 ; X64-NEXT: movl %eax, (%rdi,%rax,4)
440 ; X64-NEXT: leal 1(%rax), %ecx
441 ; X64-NEXT: movl %ecx, 4(%rdi,%rax,4)
442 ; X64-NEXT: leal 2(%rax), %ecx
443 ; X64-NEXT: movl %ecx, 8(%rdi,%rax,4)
444 ; X64-NEXT: leal 3(%rax), %ecx
445 ; X64-NEXT: movl %ecx, 12(%rdi,%rax,4)
446 ; X64-NEXT: addq $4, %rax
447 ; X64-NEXT: cmpl %esi, %eax
448 ; X64-NEXT: jl .LBB4_1
449 ; X64-NEXT: # %bb.2: # %exit
452 ; X32-LABEL: multioper:
453 ; X32: # %bb.0: # %entry
454 ; X32-NEXT: pushl %esi
455 ; X32-NEXT: xorl %eax, %eax
456 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
457 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
458 ; X32-NEXT: .p2align 4, 0x90
459 ; X32-NEXT: .LBB4_1: # %for.body
460 ; X32-NEXT: # =>This Inner Loop Header: Depth=1
461 ; X32-NEXT: movl %eax, (%edx,%eax,4)
462 ; X32-NEXT: leal 1(%eax), %esi
463 ; X32-NEXT: movl %esi, 4(%edx,%eax,4)
464 ; X32-NEXT: leal 2(%eax), %esi
465 ; X32-NEXT: movl %esi, 8(%edx,%eax,4)
466 ; X32-NEXT: leal 3(%eax), %esi
467 ; X32-NEXT: movl %esi, 12(%edx,%eax,4)
468 ; X32-NEXT: addl $4, %eax
469 ; X32-NEXT: cmpl %ecx, %eax
470 ; X32-NEXT: jl .LBB4_1
471 ; X32-NEXT: # %bb.2: # %exit
472 ; X32-NEXT: popl %esi
478 %p = phi ptr [ %p.next, %for.body ], [ %a, %entry ]
479 %i = phi i32 [ %inc4, %for.body ], [ 0, %entry ]
480 store i32 %i, ptr %p, align 4
481 %inc1 = or disjoint i32 %i, 1
482 %add.ptr.i1 = getelementptr inbounds i32, ptr %p, i32 1
483 store i32 %inc1, ptr %add.ptr.i1, align 4
484 %inc2 = add nsw i32 %i, 2
485 %add.ptr.i2 = getelementptr inbounds i32, ptr %p, i32 2
486 store i32 %inc2, ptr %add.ptr.i2, align 4
487 %inc3 = add nsw i32 %i, 3
488 %add.ptr.i3 = getelementptr inbounds i32, ptr %p, i32 3
489 store i32 %inc3, ptr %add.ptr.i3, align 4
490 %p.next = getelementptr inbounds i32, ptr %p, i32 4
491 %inc4 = add nsw i32 %i, 4
492 %cmp = icmp slt i32 %inc4, %n
493 br i1 %cmp, label %for.body, label %exit
499 ; @testCmpZero has a ICmpZero LSR use that should not be hidden from
500 ; LSR. Profitable chains should have more than one nonzero increment
503 define void @testCmpZero(ptr %src, ptr %dst, i32 %srcidx, i32 %dstidx, i32 %len) nounwind ssp {
504 ; X64-LABEL: testCmpZero:
505 ; X64: # %bb.0: # %entry
506 ; X64-NEXT: movslq %edx, %rdx
507 ; X64-NEXT: addq %rdx, %rdi
508 ; X64-NEXT: movslq %ecx, %rax
509 ; X64-NEXT: addq %rsi, %rax
510 ; X64-NEXT: addl %edx, %r8d
511 ; X64-NEXT: movslq %r8d, %rcx
512 ; X64-NEXT: subq %rdx, %rcx
513 ; X64-NEXT: xorl %edx, %edx
514 ; X64-NEXT: .p2align 4, 0x90
515 ; X64-NEXT: .LBB5_1: # %for.body82.us
516 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
517 ; X64-NEXT: movzbl (%rax,%rdx,4), %esi
518 ; X64-NEXT: movb %sil, (%rdi,%rdx)
519 ; X64-NEXT: incq %rdx
520 ; X64-NEXT: cmpq %rdx, %rcx
521 ; X64-NEXT: jne .LBB5_1
522 ; X64-NEXT: # %bb.2: # %return
525 ; X32-LABEL: testCmpZero:
526 ; X32: # %bb.0: # %entry
527 ; X32-NEXT: pushl %ebx
528 ; X32-NEXT: pushl %esi
529 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
530 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
531 ; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
532 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
533 ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
534 ; X32-NEXT: xorl %esi, %esi
535 ; X32-NEXT: .p2align 4, 0x90
536 ; X32-NEXT: .LBB5_1: # %for.body82.us
537 ; X32-NEXT: # =>This Inner Loop Header: Depth=1
538 ; X32-NEXT: movzbl (%edx,%esi,4), %ebx
539 ; X32-NEXT: movb %bl, (%ecx,%esi)
540 ; X32-NEXT: incl %esi
541 ; X32-NEXT: cmpl %esi, %eax
542 ; X32-NEXT: jne .LBB5_1
543 ; X32-NEXT: # %bb.2: # %return
544 ; X32-NEXT: popl %esi
545 ; X32-NEXT: popl %ebx
548 %dest0 = getelementptr inbounds i8, ptr %src, i32 %srcidx
549 %source0 = getelementptr inbounds i8, ptr %dst, i32 %dstidx
550 %add.ptr79.us.sum = add i32 %srcidx, %len
551 %lftr.limit = getelementptr i8, ptr %src, i32 %add.ptr79.us.sum
552 br label %for.body82.us
555 %dest = phi ptr [ %dest0, %entry ], [ %incdec.ptr91.us, %for.body82.us ]
556 %source = phi ptr [ %source0, %entry ], [ %add.ptr83.us, %for.body82.us ]
557 %0 = load i32, ptr %source, align 4
558 %trunc = trunc i32 %0 to i8
559 %add.ptr83.us = getelementptr inbounds i8, ptr %source, i32 4
560 %incdec.ptr91.us = getelementptr inbounds i8, ptr %dest, i32 1
561 store i8 %trunc, ptr %dest, align 1
562 %exitcond = icmp eq ptr %incdec.ptr91.us, %lftr.limit
563 br i1 %exitcond, label %return, label %for.body82.us