1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-darwin -mcpu=generic | FileCheck %s -check-prefixes=CHECK,GENERIC
3 ; RUN: llc < %s -mtriple=x86_64-darwin -mcpu=atom | FileCheck %s -check-prefixes=CHECK,ATOM
5 @Te0 = external global [256 x i32] ; <[256 x i32]*> [#uses=5]
6 @Te1 = external global [256 x i32] ; <[256 x i32]*> [#uses=4]
7 @Te3 = external global [256 x i32] ; <[256 x i32]*> [#uses=2]
9 define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind {
11 ; GENERIC: ## %bb.0: ## %entry
12 ; GENERIC-NEXT: pushq %rbp
13 ; GENERIC-NEXT: pushq %r14
14 ; GENERIC-NEXT: pushq %rbx
15 ; GENERIC-NEXT: ## kill: def $ecx killed $ecx def $rcx
16 ; GENERIC-NEXT: movl (%rdx), %eax
17 ; GENERIC-NEXT: movl 4(%rdx), %ebx
18 ; GENERIC-NEXT: decl %ecx
19 ; GENERIC-NEXT: leaq 20(%rdx), %r14
20 ; GENERIC-NEXT: movq _Te0@{{.*}}(%rip), %r9
21 ; GENERIC-NEXT: movq _Te1@{{.*}}(%rip), %r8
22 ; GENERIC-NEXT: movq _Te3@{{.*}}(%rip), %r10
23 ; GENERIC-NEXT: movq %rcx, %r11
24 ; GENERIC-NEXT: .p2align 4, 0x90
25 ; GENERIC-NEXT: LBB0_1: ## %bb
26 ; GENERIC-NEXT: ## =>This Inner Loop Header: Depth=1
27 ; GENERIC-NEXT: movzbl %al, %edi
28 ; GENERIC-NEXT: ## kill: def $eax killed $eax def $rax
29 ; GENERIC-NEXT: shrl $24, %eax
30 ; GENERIC-NEXT: movl %ebx, %ebp
31 ; GENERIC-NEXT: shrl $16, %ebp
32 ; GENERIC-NEXT: movzbl %bpl, %ebp
33 ; GENERIC-NEXT: movl (%r8,%rbp,4), %ebp
34 ; GENERIC-NEXT: xorl (%r9,%rax,4), %ebp
35 ; GENERIC-NEXT: xorl -12(%r14), %ebp
36 ; GENERIC-NEXT: shrl $24, %ebx
37 ; GENERIC-NEXT: movl (%r10,%rdi,4), %edi
38 ; GENERIC-NEXT: xorl (%r9,%rbx,4), %edi
39 ; GENERIC-NEXT: xorl -8(%r14), %edi
40 ; GENERIC-NEXT: movl %ebp, %eax
41 ; GENERIC-NEXT: shrl $24, %eax
42 ; GENERIC-NEXT: movl (%r9,%rax,4), %eax
43 ; GENERIC-NEXT: testq %r11, %r11
44 ; GENERIC-NEXT: je LBB0_3
45 ; GENERIC-NEXT: ## %bb.2: ## %bb1
46 ; GENERIC-NEXT: ## in Loop: Header=BB0_1 Depth=1
47 ; GENERIC-NEXT: movl %edi, %ebx
48 ; GENERIC-NEXT: shrl $16, %ebx
49 ; GENERIC-NEXT: movzbl %bl, %ebx
50 ; GENERIC-NEXT: xorl (%r8,%rbx,4), %eax
51 ; GENERIC-NEXT: xorl -4(%r14), %eax
52 ; GENERIC-NEXT: shrl $24, %edi
53 ; GENERIC-NEXT: movzbl %bpl, %ebx
54 ; GENERIC-NEXT: movl (%r10,%rbx,4), %ebx
55 ; GENERIC-NEXT: xorl (%r9,%rdi,4), %ebx
56 ; GENERIC-NEXT: xorl (%r14), %ebx
57 ; GENERIC-NEXT: decq %r11
58 ; GENERIC-NEXT: addq $16, %r14
59 ; GENERIC-NEXT: jmp LBB0_1
60 ; GENERIC-NEXT: LBB0_3: ## %bb2
61 ; GENERIC-NEXT: shlq $4, %rcx
62 ; GENERIC-NEXT: andl $-16777216, %eax ## imm = 0xFF000000
63 ; GENERIC-NEXT: movl %edi, %ebx
64 ; GENERIC-NEXT: shrl $16, %ebx
65 ; GENERIC-NEXT: movzbl %bl, %ebx
66 ; GENERIC-NEXT: movzbl 2(%r8,%rbx,4), %ebx
67 ; GENERIC-NEXT: shll $16, %ebx
68 ; GENERIC-NEXT: orl %eax, %ebx
69 ; GENERIC-NEXT: xorl 16(%rcx,%rdx), %ebx
70 ; GENERIC-NEXT: shrl $8, %edi
71 ; GENERIC-NEXT: movzbl 3(%r9,%rdi,4), %eax
72 ; GENERIC-NEXT: shll $24, %eax
73 ; GENERIC-NEXT: movzbl %bpl, %edi
74 ; GENERIC-NEXT: movzbl 2(%r8,%rdi,4), %edi
75 ; GENERIC-NEXT: shll $16, %edi
76 ; GENERIC-NEXT: orl %eax, %edi
77 ; GENERIC-NEXT: xorl 20(%rcx,%rdx), %edi
78 ; GENERIC-NEXT: movl %ebx, %eax
79 ; GENERIC-NEXT: shrl $24, %eax
80 ; GENERIC-NEXT: movb %al, (%rsi)
81 ; GENERIC-NEXT: shrl $16, %ebx
82 ; GENERIC-NEXT: movb %bl, 1(%rsi)
83 ; GENERIC-NEXT: movl %edi, %eax
84 ; GENERIC-NEXT: shrl $24, %eax
85 ; GENERIC-NEXT: movb %al, 4(%rsi)
86 ; GENERIC-NEXT: shrl $16, %edi
87 ; GENERIC-NEXT: movb %dil, 5(%rsi)
88 ; GENERIC-NEXT: popq %rbx
89 ; GENERIC-NEXT: popq %r14
90 ; GENERIC-NEXT: popq %rbp
94 ; ATOM: ## %bb.0: ## %entry
95 ; ATOM-NEXT: pushq %rbp
96 ; ATOM-NEXT: pushq %r15
97 ; ATOM-NEXT: pushq %r14
98 ; ATOM-NEXT: pushq %rbx
99 ; ATOM-NEXT: ## kill: def $ecx killed $ecx def $rcx
100 ; ATOM-NEXT: movl (%rdx), %r15d
101 ; ATOM-NEXT: movl 4(%rdx), %eax
102 ; ATOM-NEXT: leaq 20(%rdx), %r14
103 ; ATOM-NEXT: movq _Te0@{{.*}}(%rip), %r9
104 ; ATOM-NEXT: movq _Te1@{{.*}}(%rip), %r8
105 ; ATOM-NEXT: movq _Te3@{{.*}}(%rip), %r10
106 ; ATOM-NEXT: decl %ecx
107 ; ATOM-NEXT: movq %rcx, %r11
108 ; ATOM-NEXT: .p2align 4, 0x90
109 ; ATOM-NEXT: LBB0_1: ## %bb
110 ; ATOM-NEXT: ## =>This Inner Loop Header: Depth=1
111 ; ATOM-NEXT: movl %eax, %edi
112 ; ATOM-NEXT: movl %r15d, %ebp
113 ; ATOM-NEXT: shrl $24, %eax
114 ; ATOM-NEXT: shrl $16, %edi
115 ; ATOM-NEXT: shrl $24, %ebp
116 ; ATOM-NEXT: movzbl %dil, %edi
117 ; ATOM-NEXT: movl (%r8,%rdi,4), %ebx
118 ; ATOM-NEXT: movzbl %r15b, %edi
119 ; ATOM-NEXT: xorl (%r9,%rbp,4), %ebx
120 ; ATOM-NEXT: movl (%r10,%rdi,4), %edi
121 ; ATOM-NEXT: xorl -12(%r14), %ebx
122 ; ATOM-NEXT: xorl (%r9,%rax,4), %edi
123 ; ATOM-NEXT: movl %ebx, %eax
124 ; ATOM-NEXT: xorl -8(%r14), %edi
125 ; ATOM-NEXT: shrl $24, %eax
126 ; ATOM-NEXT: movl (%r9,%rax,4), %r15d
127 ; ATOM-NEXT: testq %r11, %r11
128 ; ATOM-NEXT: movl %edi, %eax
129 ; ATOM-NEXT: je LBB0_3
130 ; ATOM-NEXT: ## %bb.2: ## %bb1
131 ; ATOM-NEXT: ## in Loop: Header=BB0_1 Depth=1
132 ; ATOM-NEXT: shrl $16, %eax
133 ; ATOM-NEXT: shrl $24, %edi
134 ; ATOM-NEXT: decq %r11
135 ; ATOM-NEXT: movzbl %al, %ebp
136 ; ATOM-NEXT: movzbl %bl, %eax
137 ; ATOM-NEXT: movl (%r10,%rax,4), %eax
138 ; ATOM-NEXT: xorl (%r8,%rbp,4), %r15d
139 ; ATOM-NEXT: xorl (%r9,%rdi,4), %eax
140 ; ATOM-NEXT: xorl -4(%r14), %r15d
141 ; ATOM-NEXT: xorl (%r14), %eax
142 ; ATOM-NEXT: addq $16, %r14
143 ; ATOM-NEXT: jmp LBB0_1
144 ; ATOM-NEXT: LBB0_3: ## %bb2
145 ; ATOM-NEXT: shrl $16, %eax
146 ; ATOM-NEXT: shrl $8, %edi
147 ; ATOM-NEXT: movzbl %bl, %ebp
148 ; ATOM-NEXT: andl $-16777216, %r15d ## imm = 0xFF000000
149 ; ATOM-NEXT: shlq $4, %rcx
150 ; ATOM-NEXT: movzbl %al, %eax
151 ; ATOM-NEXT: movzbl 3(%r9,%rdi,4), %edi
152 ; ATOM-NEXT: movzbl 2(%r8,%rbp,4), %ebp
153 ; ATOM-NEXT: movzbl 2(%r8,%rax,4), %eax
154 ; ATOM-NEXT: shll $24, %edi
155 ; ATOM-NEXT: shll $16, %ebp
156 ; ATOM-NEXT: shll $16, %eax
157 ; ATOM-NEXT: orl %edi, %ebp
158 ; ATOM-NEXT: orl %r15d, %eax
159 ; ATOM-NEXT: xorl 20(%rcx,%rdx), %ebp
160 ; ATOM-NEXT: xorl 16(%rcx,%rdx), %eax
161 ; ATOM-NEXT: movl %eax, %edi
162 ; ATOM-NEXT: shrl $16, %eax
163 ; ATOM-NEXT: shrl $24, %edi
164 ; ATOM-NEXT: movb %dil, (%rsi)
165 ; ATOM-NEXT: movb %al, 1(%rsi)
166 ; ATOM-NEXT: movl %ebp, %eax
167 ; ATOM-NEXT: shrl $16, %ebp
168 ; ATOM-NEXT: shrl $24, %eax
169 ; ATOM-NEXT: movb %al, 4(%rsi)
170 ; ATOM-NEXT: movb %bpl, 5(%rsi)
171 ; ATOM-NEXT: popq %rbx
172 ; ATOM-NEXT: popq %r14
173 ; ATOM-NEXT: popq %r15
174 ; ATOM-NEXT: popq %rbp
177 %0 = load i32, i32* %rk, align 4 ; <i32> [#uses=1]
178 %1 = getelementptr i32, i32* %rk, i64 1 ; <i32*> [#uses=1]
179 %2 = load i32, i32* %1, align 4 ; <i32> [#uses=1]
180 %tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
181 %tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
184 bb: ; preds = %bb1, %entry
185 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %bb1 ] ; <i64> [#uses=3]
186 %s1.0 = phi i32 [ %2, %entry ], [ %56, %bb1 ] ; <i32> [#uses=2]
187 %s0.0 = phi i32 [ %0, %entry ], [ %43, %bb1 ] ; <i32> [#uses=2]
188 %tmp18 = shl i64 %indvar, 4 ; <i64> [#uses=4]
189 %rk26 = bitcast i32* %rk to i8* ; <i8*> [#uses=6]
190 %3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
191 %4 = zext i32 %3 to i64 ; <i64> [#uses=1]
192 %5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
193 %6 = load i32, i32* %5, align 4 ; <i32> [#uses=1]
194 %7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
195 %8 = and i32 %7, 255 ; <i32> [#uses=1]
196 %9 = zext i32 %8 to i64 ; <i64> [#uses=1]
197 %10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
198 %11 = load i32, i32* %10, align 4 ; <i32> [#uses=1]
199 %ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
200 %12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
201 %13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
202 %14 = load i32, i32* %13, align 4 ; <i32> [#uses=1]
203 %15 = xor i32 %11, %6 ; <i32> [#uses=1]
204 %16 = xor i32 %15, %14 ; <i32> [#uses=3]
205 %17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
206 %18 = zext i32 %17 to i64 ; <i64> [#uses=1]
207 %19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
208 %20 = load i32, i32* %19, align 4 ; <i32> [#uses=1]
209 %21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
210 %22 = zext i32 %21 to i64 ; <i64> [#uses=1]
211 %23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
212 %24 = load i32, i32* %23, align 4 ; <i32> [#uses=1]
213 %ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
214 %25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
215 %26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
216 %27 = load i32, i32* %26, align 4 ; <i32> [#uses=1]
217 %28 = xor i32 %24, %20 ; <i32> [#uses=1]
218 %29 = xor i32 %28, %27 ; <i32> [#uses=4]
219 %30 = lshr i32 %16, 24 ; <i32> [#uses=1]
220 %31 = zext i32 %30 to i64 ; <i64> [#uses=1]
221 %32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
222 %33 = load i32, i32* %32, align 4 ; <i32> [#uses=2]
223 %exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
224 br i1 %exitcond, label %bb2, label %bb1
227 %ctg2.sum31 = add i64 %tmp18, 16 ; <i64> [#uses=1]
228 %34 = getelementptr i8, i8* %rk26, i64 %ctg2.sum31 ; <i8*> [#uses=1]
229 %35 = bitcast i8* %34 to i32* ; <i32*> [#uses=1]
230 %36 = lshr i32 %29, 16 ; <i32> [#uses=1]
231 %37 = and i32 %36, 255 ; <i32> [#uses=1]
232 %38 = zext i32 %37 to i64 ; <i64> [#uses=1]
233 %39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
234 %40 = load i32, i32* %39, align 4 ; <i32> [#uses=1]
235 %41 = load i32, i32* %35, align 4 ; <i32> [#uses=1]
236 %42 = xor i32 %40, %33 ; <i32> [#uses=1]
237 %43 = xor i32 %42, %41 ; <i32> [#uses=1]
238 %44 = lshr i32 %29, 24 ; <i32> [#uses=1]
239 %45 = zext i32 %44 to i64 ; <i64> [#uses=1]
240 %46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
241 %47 = load i32, i32* %46, align 4 ; <i32> [#uses=1]
242 %48 = and i32 %16, 255 ; <i32> [#uses=1]
243 %49 = zext i32 %48 to i64 ; <i64> [#uses=1]
244 %50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
245 %51 = load i32, i32* %50, align 4 ; <i32> [#uses=1]
246 %ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
247 %52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
248 %53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
249 %54 = load i32, i32* %53, align 4 ; <i32> [#uses=1]
250 %55 = xor i32 %51, %47 ; <i32> [#uses=1]
251 %56 = xor i32 %55, %54 ; <i32> [#uses=1]
252 %indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
256 %tmp10 = shl i64 %tmp.16, 4 ; <i64> [#uses=2]
257 %ctg2.sum = add i64 %tmp10, 16 ; <i64> [#uses=1]
258 %tmp1213 = getelementptr i8, i8* %rk26, i64 %ctg2.sum ; <i8*> [#uses=1]
259 %57 = bitcast i8* %tmp1213 to i32* ; <i32*> [#uses=1]
260 %58 = and i32 %33, -16777216 ; <i32> [#uses=1]
261 %59 = lshr i32 %29, 16 ; <i32> [#uses=1]
262 %60 = and i32 %59, 255 ; <i32> [#uses=1]
263 %61 = zext i32 %60 to i64 ; <i64> [#uses=1]
264 %62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
265 %63 = load i32, i32* %62, align 4 ; <i32> [#uses=1]
266 %64 = and i32 %63, 16711680 ; <i32> [#uses=1]
267 %65 = or i32 %64, %58 ; <i32> [#uses=1]
268 %66 = load i32, i32* %57, align 4 ; <i32> [#uses=1]
269 %67 = xor i32 %65, %66 ; <i32> [#uses=2]
270 %68 = lshr i32 %29, 8 ; <i32> [#uses=1]
271 %69 = zext i32 %68 to i64 ; <i64> [#uses=1]
272 %70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
273 %71 = load i32, i32* %70, align 4 ; <i32> [#uses=1]
274 %72 = and i32 %71, -16777216 ; <i32> [#uses=1]
275 %73 = and i32 %16, 255 ; <i32> [#uses=1]
276 %74 = zext i32 %73 to i64 ; <i64> [#uses=1]
277 %75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
278 %76 = load i32, i32* %75, align 4 ; <i32> [#uses=1]
279 %77 = and i32 %76, 16711680 ; <i32> [#uses=1]
280 %78 = or i32 %77, %72 ; <i32> [#uses=1]
281 %ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
282 %79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
283 %80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
284 %81 = load i32, i32* %80, align 4 ; <i32> [#uses=1]
285 %82 = xor i32 %78, %81 ; <i32> [#uses=2]
286 %83 = lshr i32 %67, 24 ; <i32> [#uses=1]
287 %84 = trunc i32 %83 to i8 ; <i8> [#uses=1]
288 store i8 %84, i8* %out, align 1
289 %85 = lshr i32 %67, 16 ; <i32> [#uses=1]
290 %86 = trunc i32 %85 to i8 ; <i8> [#uses=1]
291 %87 = getelementptr i8, i8* %out, i64 1 ; <i8*> [#uses=1]
292 store i8 %86, i8* %87, align 1
293 %88 = getelementptr i8, i8* %out, i64 4 ; <i8*> [#uses=1]
294 %89 = lshr i32 %82, 24 ; <i32> [#uses=1]
295 %90 = trunc i32 %89 to i8 ; <i8> [#uses=1]
296 store i8 %90, i8* %88, align 1
297 %91 = lshr i32 %82, 16 ; <i32> [#uses=1]
298 %92 = trunc i32 %91 to i8 ; <i8> [#uses=1]
299 %93 = getelementptr i8, i8* %out, i64 5 ; <i8*> [#uses=1]
300 store i8 %92, i8* %93, align 1
304 ; Check that DAGCombiner doesn't mess up the IV update when the exiting value
305 ; is equal to the stride.
306 ; It must not fold (cmp (add iv, 1), 1) --> (cmp iv, 0).
308 define i32 @f(i32 %i, i32* nocapture %a) nounwind uwtable readonly ssp {
310 ; GENERIC: ## %bb.0: ## %entry
311 ; GENERIC-NEXT: xorl %eax, %eax
312 ; GENERIC-NEXT: cmpl $1, %edi
313 ; GENERIC-NEXT: je LBB1_3
314 ; GENERIC-NEXT: ## %bb.1: ## %for.body.lr.ph
315 ; GENERIC-NEXT: movslq %edi, %rax
316 ; GENERIC-NEXT: leaq (%rsi,%rax,4), %rcx
317 ; GENERIC-NEXT: xorl %eax, %eax
318 ; GENERIC-NEXT: xorl %edx, %edx
319 ; GENERIC-NEXT: .p2align 4, 0x90
320 ; GENERIC-NEXT: LBB1_2: ## %for.body
321 ; GENERIC-NEXT: ## =>This Inner Loop Header: Depth=1
322 ; GENERIC-NEXT: movl (%rcx), %esi
323 ; GENERIC-NEXT: cmpl %edx, %esi
324 ; GENERIC-NEXT: cmoval %esi, %edx
325 ; GENERIC-NEXT: cmoval %edi, %eax
326 ; GENERIC-NEXT: incl %edi
327 ; GENERIC-NEXT: addq $4, %rcx
328 ; GENERIC-NEXT: cmpl $1, %edi
329 ; GENERIC-NEXT: jne LBB1_2
330 ; GENERIC-NEXT: LBB1_3: ## %for.end
334 ; ATOM: ## %bb.0: ## %entry
335 ; ATOM-NEXT: xorl %eax, %eax
336 ; ATOM-NEXT: cmpl $1, %edi
337 ; ATOM-NEXT: je LBB1_3
338 ; ATOM-NEXT: ## %bb.1: ## %for.body.lr.ph
339 ; ATOM-NEXT: movslq %edi, %rax
340 ; ATOM-NEXT: xorl %edx, %edx
341 ; ATOM-NEXT: leaq (%rsi,%rax,4), %rcx
342 ; ATOM-NEXT: xorl %eax, %eax
343 ; ATOM-NEXT: .p2align 4, 0x90
344 ; ATOM-NEXT: LBB1_2: ## %for.body
345 ; ATOM-NEXT: ## =>This Inner Loop Header: Depth=1
346 ; ATOM-NEXT: movl (%rcx), %esi
347 ; ATOM-NEXT: cmpl %edx, %esi
348 ; ATOM-NEXT: cmoval %esi, %edx
349 ; ATOM-NEXT: cmoval %edi, %eax
350 ; ATOM-NEXT: incl %edi
351 ; ATOM-NEXT: leaq 4(%rcx), %rcx
352 ; ATOM-NEXT: cmpl $1, %edi
353 ; ATOM-NEXT: jne LBB1_2
354 ; ATOM-NEXT: LBB1_3: ## %for.end
359 %cmp4 = icmp eq i32 %i, 1
360 br i1 %cmp4, label %for.end, label %for.body.lr.ph
362 for.body.lr.ph: ; preds = %entry
363 %0 = sext i32 %i to i64
366 for.body: ; preds = %for.body.lr.ph, %for.body
367 %indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
368 %bi.06 = phi i32 [ 0, %for.body.lr.ph ], [ %i.addr.0.bi.0, %for.body ]
369 %b.05 = phi i32 [ 0, %for.body.lr.ph ], [ %.b.0, %for.body ]
370 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
371 %1 = load i32, i32* %arrayidx, align 4
372 %cmp1 = icmp ugt i32 %1, %b.05
373 %.b.0 = select i1 %cmp1, i32 %1, i32 %b.05
374 %2 = trunc i64 %indvars.iv to i32
375 %i.addr.0.bi.0 = select i1 %cmp1, i32 %2, i32 %bi.06
376 %indvars.iv.next = add i64 %indvars.iv, 1
377 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
378 %exitcond = icmp eq i32 %lftr.wideiv, 1
379 br i1 %exitcond, label %for.end, label %for.body
381 for.end: ; preds = %for.body, %entry
382 %bi.0.lcssa = phi i32 [ 0, %entry ], [ %i.addr.0.bi.0, %for.body ]