1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -licm -loop-vectorize -force-vector-width=4 -dce -instcombine -licm -S | FileCheck %s
4 ; First licm pass is to hoist/sink invariant stores if possible. Today LICM does
5 ; not hoist/sink the invariant stores. Even if that changes, we should still
6 ; vectorize this loop in case licm is not run.
8 ; The next licm pass after vectorization is to hoist/sink loop invariant
10 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
12 ; This file separates tests with auto-generated check lines from
13 ; invariant-store-vectorization.ll for maintenance.
14 ; all tests check that it is legal to vectorize the stores to invariant
17 ; Instcombine'd version of @inv_val_store_to_inv_address_conditional_diff_values.
18 ; Now the store is no longer of invariant value.
19 ; scalar store the value extracted from the last element of the vector value.
20 define void @inv_val_store_to_inv_address_conditional_diff_values_ic(i32* %a, i64 %n, i32* %b, i32 %k) {
21 ; CHECK-LABEL: @inv_val_store_to_inv_address_conditional_diff_values_ic(
23 ; CHECK-NEXT: [[NTRUNC:%.*]] = trunc i64 [[N:%.*]] to i32
24 ; CHECK-NEXT: [[SMAX6:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1)
25 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX6]], 4
26 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
27 ; CHECK: vector.memcheck:
28 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1)
29 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B:%.*]], i64 [[SMAX]]
30 ; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 1
31 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i32* [[SCEVGEP4]], [[B]]
32 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[A]]
33 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
34 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
36 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX6]], 9223372036854775804
37 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[K:%.*]], i64 0
38 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
39 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <4 x i32> poison, i32 [[NTRUNC]], i64 0
40 ; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT7]], <4 x i32> poison, <4 x i32> zeroinitializer
41 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
43 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
44 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
45 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
46 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 8, !alias.scope !0, !noalias !3
47 ; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
48 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
49 ; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT8]], <4 x i32>* [[TMP2]], align 4, !alias.scope !0, !noalias !3
50 ; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[DOTNOT]], <4 x i32> [[BROADCAST_SPLAT8]], <4 x i32> [[BROADCAST_SPLAT]]
51 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[PREDPHI]], i64 3
52 ; CHECK-NEXT: store i32 [[TMP3]], i32* [[A]], align 4, !alias.scope !3
53 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
54 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
55 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
56 ; CHECK: middle.block:
57 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX6]], [[N_VEC]]
58 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
60 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
61 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
63 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
64 ; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]]
65 ; CHECK-NEXT: [[I2:%.*]] = load i32, i32* [[I1]], align 8
66 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[I2]], [[K]]
67 ; CHECK-NEXT: store i32 [[NTRUNC]], i32* [[I1]], align 4
68 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_STORE:%.*]], label [[COND_STORE_K:%.*]]
70 ; CHECK-NEXT: br label [[LATCH]]
71 ; CHECK: cond_store_k:
72 ; CHECK-NEXT: br label [[LATCH]]
74 ; CHECK-NEXT: [[STOREVAL:%.*]] = phi i32 [ [[NTRUNC]], [[COND_STORE]] ], [ [[K]], [[COND_STORE_K]] ]
75 ; CHECK-NEXT: store i32 [[STOREVAL]], i32* [[A]], align 4
76 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
77 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
78 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]], !llvm.loop [[LOOP7:![0-9]+]]
79 ; CHECK: for.end.loopexit:
80 ; CHECK-NEXT: br label [[FOR_END]]
82 ; CHECK-NEXT: ret void
85 %ntrunc = trunc i64 %n to i32
88 for.body: ; preds = %for.body, %entry
89 %i = phi i64 [ %i.next, %latch ], [ 0, %entry ]
90 %i1 = getelementptr inbounds i32, i32* %b, i64 %i
91 %i2 = load i32, i32* %i1, align 8
92 %cmp = icmp eq i32 %i2, %k
93 store i32 %ntrunc, i32* %i1
94 br i1 %cmp, label %cond_store, label %cond_store_k
103 %storeval = phi i32 [ %ntrunc, %cond_store ], [ %k, %cond_store_k ]
104 store i32 %storeval, i32* %a
105 %i.next = add nuw nsw i64 %i, 1
106 %cond = icmp slt i64 %i.next, %n
107 br i1 %cond, label %for.body, label %for.end
109 for.end: ; preds = %for.body
113 ; invariant val stored to invariant address predicated on invariant condition
114 ; This is not treated as a predicated store since the block the store belongs to
115 ; is the latch block (which doesn't need to be predicated).
116 ; variant/invariant values being stored to invariant address.
117 ; test checks that the last element of the phi is extracted and scalar stored
118 ; into the uniform address within the loop.
119 ; Since the condition and the phi is loop invariant, they are LICM'ed after
121 define void @inv_val_store_to_inv_address_conditional_inv(i32* %a, i64 %n, i32* %b, i32 %k) {
122 ; CHECK-LABEL: @inv_val_store_to_inv_address_conditional_inv(
124 ; CHECK-NEXT: [[NTRUNC:%.*]] = trunc i64 [[N:%.*]] to i32
125 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[NTRUNC]], [[K:%.*]]
126 ; CHECK-NEXT: [[SMAX6:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1)
127 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX6]], 4
128 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
129 ; CHECK: vector.memcheck:
130 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1)
131 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B:%.*]], i64 [[SMAX]]
132 ; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 1
133 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i32* [[SCEVGEP4]], [[B]]
134 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[A]]
135 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
136 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
138 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX6]], 9223372036854775804
139 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[NTRUNC]], i64 0
140 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
141 ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i1> undef, i1 [[CMP]], i64 3
142 ; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = insertelement <4 x i32> poison, i32 [[K]], i64 3
143 ; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[BROADCAST_SPLAT]], <4 x i32> [[BROADCAST_SPLAT10]]
144 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[PREDPHI]], i64 3
145 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
146 ; CHECK: vector.body:
147 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
148 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
149 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <4 x i32>*
150 ; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], <4 x i32>* [[TMP3]], align 4, !alias.scope !8, !noalias !11
151 ; CHECK-NEXT: store i32 [[TMP1]], i32* [[A]], align 4, !alias.scope !11
152 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
153 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
154 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
155 ; CHECK: middle.block:
156 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX6]], [[N_VEC]]
157 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
159 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
160 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
162 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
163 ; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]]
164 ; CHECK-NEXT: store i32 [[NTRUNC]], i32* [[I1]], align 4
165 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_STORE:%.*]], label [[COND_STORE_K:%.*]]
167 ; CHECK-NEXT: br label [[LATCH]]
168 ; CHECK: cond_store_k:
169 ; CHECK-NEXT: br label [[LATCH]]
171 ; CHECK-NEXT: [[STOREVAL:%.*]] = phi i32 [ [[NTRUNC]], [[COND_STORE]] ], [ [[K]], [[COND_STORE_K]] ]
172 ; CHECK-NEXT: store i32 [[STOREVAL]], i32* [[A]], align 4
173 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
174 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
175 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]], !llvm.loop [[LOOP14:![0-9]+]]
176 ; CHECK: for.end.loopexit:
177 ; CHECK-NEXT: br label [[FOR_END]]
179 ; CHECK-NEXT: ret void
182 %ntrunc = trunc i64 %n to i32
183 %cmp = icmp eq i32 %ntrunc, %k
186 for.body: ; preds = %for.body, %entry
187 %i = phi i64 [ %i.next, %latch ], [ 0, %entry ]
188 %i1 = getelementptr inbounds i32, i32* %b, i64 %i
189 %i2 = load i32, i32* %i1, align 8
190 store i32 %ntrunc, i32* %i1
191 br i1 %cmp, label %cond_store, label %cond_store_k
200 %storeval = phi i32 [ %ntrunc, %cond_store ], [ %k, %cond_store_k ]
201 store i32 %storeval, i32* %a
202 %i.next = add nuw nsw i64 %i, 1
203 %cond = icmp slt i64 %i.next, %n
204 br i1 %cond, label %for.body, label %for.end
206 for.end: ; preds = %for.body
210 ; variant value stored to uniform address tests that the code gen extracts the
211 ; last element from the variant vector and scalar stores it into the uniform
213 define i32 @variant_val_store_to_inv_address(i32* %a, i64 %n, i32* %b, i32 %k) {
214 ; CHECK-LABEL: @variant_val_store_to_inv_address(
216 ; CHECK-NEXT: [[SMAX6:%.*]] = call i64 @llvm.smax.i64(i64 [[N:%.*]], i64 1)
217 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX6]], 4
218 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
219 ; CHECK: vector.memcheck:
220 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 1
221 ; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1)
222 ; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i32, i32* [[B:%.*]], i64 [[SMAX]]
223 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i32* [[SCEVGEP4]], [[A]]
224 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[B]]
225 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
226 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
228 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX6]], 9223372036854775804
229 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
230 ; CHECK: vector.body:
231 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
232 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
233 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
234 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
235 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 8, !alias.scope !15
236 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i64 3
237 ; CHECK-NEXT: store i32 [[TMP2]], i32* [[A]], align 4, !alias.scope !18, !noalias !15
238 ; CHECK-NEXT: [[TMP3]] = add <4 x i32> [[VEC_PHI]], [[WIDE_LOAD]]
239 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
240 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
241 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
242 ; CHECK: middle.block:
243 ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_BODY]] ]
244 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[DOTLCSSA]])
245 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX6]], [[N_VEC]]
246 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
248 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
249 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_MEMCHECK]] ]
250 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
252 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
253 ; CHECK-NEXT: [[I0:%.*]] = phi i32 [ [[I3:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
254 ; CHECK-NEXT: [[I1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]]
255 ; CHECK-NEXT: [[I2:%.*]] = load i32, i32* [[I1]], align 8
256 ; CHECK-NEXT: store i32 [[I2]], i32* [[A]], align 4
257 ; CHECK-NEXT: [[I3]] = add i32 [[I0]], [[I2]]
258 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
259 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
260 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]], !llvm.loop [[LOOP21:![0-9]+]]
261 ; CHECK: for.end.loopexit:
262 ; CHECK-NEXT: [[I3_LCSSA:%.*]] = phi i32 [ [[I3]], [[FOR_BODY]] ]
263 ; CHECK-NEXT: br label [[FOR_END]]
265 ; CHECK-NEXT: [[RDX_LCSSA:%.*]] = phi i32 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ [[I3_LCSSA]], [[FOR_END_LOOPEXIT]] ]
266 ; CHECK-NEXT: ret i32 [[RDX_LCSSA]]
269 %ntrunc = trunc i64 %n to i32
270 %cmp = icmp eq i32 %ntrunc, %k
273 for.body: ; preds = %for.body, %entry
274 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
275 %i0 = phi i32 [ %i3, %for.body ], [ 0, %entry ]
276 %i1 = getelementptr inbounds i32, i32* %b, i64 %i
277 %i2 = load i32, i32* %i1, align 8
278 store i32 %i2, i32* %a
279 %i3 = add i32 %i0, %i2
280 %i.next = add nuw nsw i64 %i, 1
281 %cond = icmp slt i64 %i.next, %n
282 br i1 %cond, label %for.body, label %for.end
284 for.end: ; preds = %for.body
285 %rdx.lcssa = phi i32 [ %i3, %for.body ]