1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -slp-vectorizer -slp-threshold=-6 -S -pass-remarks-output=%t < %s | FileCheck %s
3 ; RUN: cat %t | FileCheck -check-prefix=YAML %s
6 ; FIXME: The threshold is changed to keep this test case a bit smaller.
7 ; The AArch64 cost model should not give such high costs to select statements.
9 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
10 target triple = "aarch64--linux"
13 ; YAML-NEXT: Pass: slp-vectorizer
14 ; YAML-NEXT: Name: VectorizedHorizontalReduction
15 ; YAML-NEXT: Function: test_select
17 ; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
18 ; YAML-NEXT: - Cost: '-8'
19 ; YAML-NEXT: - String: ' and with tree size '
20 ; YAML-NEXT: - TreeSize: '8'
22 define i32 @test_select(i32* noalias nocapture readonly %blk1, i32* noalias nocapture readonly %blk2, i32 %lx, i32 %h) {
23 ; CHECK-LABEL: @test_select(
25 ; CHECK-NEXT: [[CMP_22:%.*]] = icmp sgt i32 [[H:%.*]], 0
26 ; CHECK-NEXT: br i1 [[CMP_22]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
27 ; CHECK: for.body.lr.ph:
28 ; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[LX:%.*]] to i64
29 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
31 ; CHECK-NEXT: [[S_026:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[OP_EXTRA:%.*]], [[FOR_BODY]] ]
32 ; CHECK-NEXT: [[J_025:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
33 ; CHECK-NEXT: [[P2_024:%.*]] = phi i32* [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR29:%.*]], [[FOR_BODY]] ]
34 ; CHECK-NEXT: [[P1_023:%.*]] = phi i32* [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ]
35 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 1
36 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 1
37 ; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 2
38 ; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 2
39 ; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, i32* [[P1_023]], i64 3
40 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P1_023]] to <4 x i32>*
41 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
42 ; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds i32, i32* [[P2_024]], i64 3
43 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[P2_024]] to <4 x i32>*
44 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
45 ; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP1]], [[TMP3]]
46 ; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer
47 ; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP4]]
48 ; CHECK-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP4]]
49 ; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[TMP7]])
50 ; CHECK-NEXT: [[OP_EXTRA]] = add nsw i32 [[TMP8]], [[S_026]]
51 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i32, i32* [[P1_023]], i64 [[IDX_EXT]]
52 ; CHECK-NEXT: [[ADD_PTR29]] = getelementptr inbounds i32, i32* [[P2_024]], i64 [[IDX_EXT]]
53 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[J_025]], 1
54 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[H]]
55 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
56 ; CHECK: for.end.loopexit:
57 ; CHECK-NEXT: br label [[FOR_END]]
59 ; CHECK-NEXT: [[S_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_EXTRA]], [[FOR_END_LOOPEXIT]] ]
60 ; CHECK-NEXT: ret i32 [[S_0_LCSSA]]
63 %cmp.22 = icmp sgt i32 %h, 0
64 br i1 %cmp.22, label %for.body.lr.ph, label %for.end
66 for.body.lr.ph: ; preds = %entry
67 %idx.ext = sext i32 %lx to i64
70 for.body: ; preds = %for.body, %for.body.lr.ph
71 %s.026 = phi i32 [ 0, %for.body.lr.ph ], [ %add27, %for.body ]
72 %j.025 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
73 %p2.024 = phi i32* [ %blk2, %for.body.lr.ph ], [ %add.ptr29, %for.body ]
74 %p1.023 = phi i32* [ %blk1, %for.body.lr.ph ], [ %add.ptr, %for.body ]
75 %0 = load i32, i32* %p1.023, align 4
76 %1 = load i32, i32* %p2.024, align 4
77 %sub = sub nsw i32 %0, %1
78 %cmp2 = icmp slt i32 %sub, 0
79 %sub3 = sub nsw i32 0, %sub
80 %sub3.sub = select i1 %cmp2, i32 %sub3, i32 %sub
81 %add = add nsw i32 %sub3.sub, %s.026
82 %arrayidx4 = getelementptr inbounds i32, i32* %p1.023, i64 1
83 %2 = load i32, i32* %arrayidx4, align 4
84 %arrayidx5 = getelementptr inbounds i32, i32* %p2.024, i64 1
85 %3 = load i32, i32* %arrayidx5, align 4
86 %sub6 = sub nsw i32 %2, %3
87 %cmp7 = icmp slt i32 %sub6, 0
88 %sub9 = sub nsw i32 0, %sub6
89 %v.1 = select i1 %cmp7, i32 %sub9, i32 %sub6
90 %add11 = add nsw i32 %add, %v.1
91 %arrayidx12 = getelementptr inbounds i32, i32* %p1.023, i64 2
92 %4 = load i32, i32* %arrayidx12, align 4
93 %arrayidx13 = getelementptr inbounds i32, i32* %p2.024, i64 2
94 %5 = load i32, i32* %arrayidx13, align 4
95 %sub14 = sub nsw i32 %4, %5
96 %cmp15 = icmp slt i32 %sub14, 0
97 %sub17 = sub nsw i32 0, %sub14
98 %sub17.sub14 = select i1 %cmp15, i32 %sub17, i32 %sub14
99 %add19 = add nsw i32 %add11, %sub17.sub14
100 %arrayidx20 = getelementptr inbounds i32, i32* %p1.023, i64 3
101 %6 = load i32, i32* %arrayidx20, align 4
102 %arrayidx21 = getelementptr inbounds i32, i32* %p2.024, i64 3
103 %7 = load i32, i32* %arrayidx21, align 4
104 %sub22 = sub nsw i32 %6, %7
105 %cmp23 = icmp slt i32 %sub22, 0
106 %sub25 = sub nsw i32 0, %sub22
107 %v.3 = select i1 %cmp23, i32 %sub25, i32 %sub22
108 %add27 = add nsw i32 %add19, %v.3
109 %add.ptr = getelementptr inbounds i32, i32* %p1.023, i64 %idx.ext
110 %add.ptr29 = getelementptr inbounds i32, i32* %p2.024, i64 %idx.ext
111 %inc = add nuw nsw i32 %j.025, 1
112 %exitcond = icmp eq i32 %inc, %h
113 br i1 %exitcond, label %for.end.loopexit, label %for.body
115 for.end.loopexit: ; preds = %for.body
118 for.end: ; preds = %for.end.loopexit, %entry
119 %s.0.lcssa = phi i32 [ 0, %entry ], [ %add27, %for.end.loopexit ]
123 ;; Check whether SLP can find a reduction phi whose incoming blocks are not
124 ;; the same as the block containing the phi.
126 ;; Came from code like,
129 ;; for (int j = 0; j < h; j++) {
130 ;; s += p1[0] * p2[0]
131 ;; s += p1[1] * p2[1];
132 ;; s += p1[2] * p2[2];
133 ;; s += p1[3] * p2[3];
139 define i32 @reduction_with_br(i32* noalias nocapture readonly %blk1, i32* noalias nocapture readonly %blk2, i32 %lx, i32 %h, i32 %lim) {
141 ; YAML-NEXT: Pass: slp-vectorizer
142 ; YAML-NEXT: Name: VectorizedHorizontalReduction
143 ; YAML-NEXT: Function: reduction_with_br
145 ; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
146 ; YAML-NEXT: - Cost: '-11'
147 ; YAML-NEXT: - String: ' and with tree size '
148 ; YAML-NEXT: - TreeSize: '3'
149 ; CHECK-LABEL: @reduction_with_br(
151 ; CHECK-NEXT: [[CMP_16:%.*]] = icmp sgt i32 [[H:%.*]], 0
152 ; CHECK-NEXT: br i1 [[CMP_16]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
153 ; CHECK: for.body.lr.ph:
154 ; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[LX:%.*]] to i64
155 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
157 ; CHECK-NEXT: [[S_020:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[OP_EXTRA:%.*]], [[IF_END:%.*]] ]
158 ; CHECK-NEXT: [[J_019:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[IF_END]] ]
159 ; CHECK-NEXT: [[P2_018:%.*]] = phi i32* [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR16:%.*]], [[IF_END]] ]
160 ; CHECK-NEXT: [[P1_017:%.*]] = phi i32* [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[IF_END]] ]
161 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[P1_017]], i64 1
162 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[P2_018]], i64 1
163 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[P1_017]], i64 2
164 ; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[P2_018]], i64 2
165 ; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[P1_017]], i64 3
166 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P1_017]] to <4 x i32>*
167 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
168 ; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, i32* [[P2_018]], i64 3
169 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[P2_018]] to <4 x i32>*
170 ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
171 ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
172 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
173 ; CHECK-NEXT: [[OP_EXTRA]] = add nsw i32 [[TMP5]], [[S_020]]
174 ; CHECK-NEXT: [[CMP14:%.*]] = icmp slt i32 [[OP_EXTRA]], [[LIM:%.*]]
175 ; CHECK-NEXT: br i1 [[CMP14]], label [[IF_END]], label [[FOR_END_LOOPEXIT:%.*]]
177 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i32, i32* [[P1_017]], i64 [[IDX_EXT]]
178 ; CHECK-NEXT: [[ADD_PTR16]] = getelementptr inbounds i32, i32* [[P2_018]], i64 [[IDX_EXT]]
179 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[J_019]], 1
180 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], [[H]]
181 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT]]
182 ; CHECK: for.end.loopexit:
183 ; CHECK-NEXT: br label [[FOR_END]]
185 ; CHECK-NEXT: [[S_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_EXTRA]], [[FOR_END_LOOPEXIT]] ]
186 ; CHECK-NEXT: ret i32 [[S_1]]
189 %cmp.16 = icmp sgt i32 %h, 0
190 br i1 %cmp.16, label %for.body.lr.ph, label %for.end
192 for.body.lr.ph: ; preds = %entry
193 %idx.ext = sext i32 %lx to i64
196 for.body: ; preds = %for.body.lr.ph, %if.end
197 %s.020 = phi i32 [ 0, %for.body.lr.ph ], [ %add13, %if.end ]
198 %j.019 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %if.end ]
199 %p2.018 = phi i32* [ %blk2, %for.body.lr.ph ], [ %add.ptr16, %if.end ]
200 %p1.017 = phi i32* [ %blk1, %for.body.lr.ph ], [ %add.ptr, %if.end ]
201 %0 = load i32, i32* %p1.017, align 4
202 %1 = load i32, i32* %p2.018, align 4
203 %mul = mul nsw i32 %1, %0
204 %add = add nsw i32 %mul, %s.020
205 %arrayidx2 = getelementptr inbounds i32, i32* %p1.017, i64 1
206 %2 = load i32, i32* %arrayidx2, align 4
207 %arrayidx3 = getelementptr inbounds i32, i32* %p2.018, i64 1
208 %3 = load i32, i32* %arrayidx3, align 4
209 %mul4 = mul nsw i32 %3, %2
210 %add5 = add nsw i32 %add, %mul4
211 %arrayidx6 = getelementptr inbounds i32, i32* %p1.017, i64 2
212 %4 = load i32, i32* %arrayidx6, align 4
213 %arrayidx7 = getelementptr inbounds i32, i32* %p2.018, i64 2
214 %5 = load i32, i32* %arrayidx7, align 4
215 %mul8 = mul nsw i32 %5, %4
216 %add9 = add nsw i32 %add5, %mul8
217 %arrayidx10 = getelementptr inbounds i32, i32* %p1.017, i64 3
218 %6 = load i32, i32* %arrayidx10, align 4
219 %arrayidx11 = getelementptr inbounds i32, i32* %p2.018, i64 3
220 %7 = load i32, i32* %arrayidx11, align 4
221 %mul12 = mul nsw i32 %7, %6
222 %add13 = add nsw i32 %add9, %mul12
223 %cmp14 = icmp slt i32 %add13, %lim
224 br i1 %cmp14, label %if.end, label %for.end.loopexit
226 if.end: ; preds = %for.body
227 %add.ptr = getelementptr inbounds i32, i32* %p1.017, i64 %idx.ext
228 %add.ptr16 = getelementptr inbounds i32, i32* %p2.018, i64 %idx.ext
229 %inc = add nuw nsw i32 %j.019, 1
230 %cmp = icmp slt i32 %inc, %h
231 br i1 %cmp, label %for.body, label %for.end.loopexit
233 for.end.loopexit: ; preds = %for.body, %if.end
236 for.end: ; preds = %for.end.loopexit, %entry
237 %s.1 = phi i32 [ 0, %entry ], [ %add13, %for.end.loopexit ]
242 ; YAML-NEXT: Pass: slp-vectorizer
243 ; YAML-NEXT: Name: VectorizedHorizontalReduction
244 ; YAML-NEXT: Function: test_unrolled_select
246 ; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
247 ; YAML-NEXT: - Cost: '-47'
248 ; YAML-NEXT: - String: ' and with tree size '
249 ; YAML-NEXT: - TreeSize: '10'
251 define i32 @test_unrolled_select(i8* noalias nocapture readonly %blk1, i8* noalias nocapture readonly %blk2, i32 %lx, i32 %h, i32 %lim) #0 {
252 ; CHECK-LABEL: @test_unrolled_select(
254 ; CHECK-NEXT: [[CMP_43:%.*]] = icmp sgt i32 [[H:%.*]], 0
255 ; CHECK-NEXT: br i1 [[CMP_43]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
256 ; CHECK: for.body.lr.ph:
257 ; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[LX:%.*]] to i64
258 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
260 ; CHECK-NEXT: [[S_047:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[OP_EXTRA:%.*]], [[IF_END_86:%.*]] ]
261 ; CHECK-NEXT: [[J_046:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[IF_END_86]] ]
262 ; CHECK-NEXT: [[P2_045:%.*]] = phi i8* [ [[BLK2:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR88:%.*]], [[IF_END_86]] ]
263 ; CHECK-NEXT: [[P1_044:%.*]] = phi i8* [ [[BLK1:%.*]], [[FOR_BODY_LR_PH]] ], [ [[ADD_PTR:%.*]], [[IF_END_86]] ]
264 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 1
265 ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 1
266 ; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 2
267 ; CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 2
268 ; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 3
269 ; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 3
270 ; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 4
271 ; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 4
272 ; CHECK-NEXT: [[ARRAYIDX50:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 5
273 ; CHECK-NEXT: [[ARRAYIDX52:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 5
274 ; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 6
275 ; CHECK-NEXT: [[ARRAYIDX63:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 6
276 ; CHECK-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds i8, i8* [[P1_044]], i64 7
277 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[P1_044]] to <8 x i8>*
278 ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 1
279 ; CHECK-NEXT: [[TMP2:%.*]] = zext <8 x i8> [[TMP1]] to <8 x i32>
280 ; CHECK-NEXT: [[ARRAYIDX74:%.*]] = getelementptr inbounds i8, i8* [[P2_045]], i64 7
281 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[P2_045]] to <8 x i8>*
282 ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[TMP3]], align 1
283 ; CHECK-NEXT: [[TMP5:%.*]] = zext <8 x i8> [[TMP4]] to <8 x i32>
284 ; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <8 x i32> [[TMP2]], [[TMP5]]
285 ; CHECK-NEXT: [[TMP7:%.*]] = icmp slt <8 x i32> [[TMP6]], zeroinitializer
286 ; CHECK-NEXT: [[TMP8:%.*]] = sub nsw <8 x i32> zeroinitializer, [[TMP6]]
287 ; CHECK-NEXT: [[TMP9:%.*]] = select <8 x i1> [[TMP7]], <8 x i32> [[TMP8]], <8 x i32> [[TMP6]]
288 ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> [[TMP9]])
289 ; CHECK-NEXT: [[OP_EXTRA]] = add nsw i32 [[TMP10]], [[S_047]]
290 ; CHECK-NEXT: [[CMP83:%.*]] = icmp slt i32 [[OP_EXTRA]], [[LIM:%.*]]
291 ; CHECK-NEXT: br i1 [[CMP83]], label [[IF_END_86]], label [[FOR_END_LOOPEXIT:%.*]]
293 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, i8* [[P1_044]], i64 [[IDX_EXT]]
294 ; CHECK-NEXT: [[ADD_PTR88]] = getelementptr inbounds i8, i8* [[P2_045]], i64 [[IDX_EXT]]
295 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[J_046]], 1
296 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], [[H]]
297 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT]]
298 ; CHECK: for.end.loopexit:
299 ; CHECK-NEXT: br label [[FOR_END]]
301 ; CHECK-NEXT: [[S_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OP_EXTRA]], [[FOR_END_LOOPEXIT]] ]
302 ; CHECK-NEXT: ret i32 [[S_1]]
305 %cmp.43 = icmp sgt i32 %h, 0
306 br i1 %cmp.43, label %for.body.lr.ph, label %for.end
308 for.body.lr.ph: ; preds = %entry
309 %idx.ext = sext i32 %lx to i64
312 for.body: ; preds = %for.body.lr.ph, %if.end.86
313 %s.047 = phi i32 [ 0, %for.body.lr.ph ], [ %add82, %if.end.86 ]
314 %j.046 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %if.end.86 ]
315 %p2.045 = phi i8* [ %blk2, %for.body.lr.ph ], [ %add.ptr88, %if.end.86 ]
316 %p1.044 = phi i8* [ %blk1, %for.body.lr.ph ], [ %add.ptr, %if.end.86 ]
317 %0 = load i8, i8* %p1.044, align 1
318 %conv = zext i8 %0 to i32
319 %1 = load i8, i8* %p2.045, align 1
320 %conv2 = zext i8 %1 to i32
321 %sub = sub nsw i32 %conv, %conv2
322 %cmp3 = icmp slt i32 %sub, 0
323 %sub5 = sub nsw i32 0, %sub
324 %sub5.sub = select i1 %cmp3, i32 %sub5, i32 %sub
325 %add = add nsw i32 %sub5.sub, %s.047
326 %arrayidx6 = getelementptr inbounds i8, i8* %p1.044, i64 1
327 %2 = load i8, i8* %arrayidx6, align 1
328 %conv7 = zext i8 %2 to i32
329 %arrayidx8 = getelementptr inbounds i8, i8* %p2.045, i64 1
330 %3 = load i8, i8* %arrayidx8, align 1
331 %conv9 = zext i8 %3 to i32
332 %sub10 = sub nsw i32 %conv7, %conv9
333 %cmp11 = icmp slt i32 %sub10, 0
334 %sub14 = sub nsw i32 0, %sub10
335 %v.1 = select i1 %cmp11, i32 %sub14, i32 %sub10
336 %add16 = add nsw i32 %add, %v.1
337 %arrayidx17 = getelementptr inbounds i8, i8* %p1.044, i64 2
338 %4 = load i8, i8* %arrayidx17, align 1
339 %conv18 = zext i8 %4 to i32
340 %arrayidx19 = getelementptr inbounds i8, i8* %p2.045, i64 2
341 %5 = load i8, i8* %arrayidx19, align 1
342 %conv20 = zext i8 %5 to i32
343 %sub21 = sub nsw i32 %conv18, %conv20
344 %cmp22 = icmp slt i32 %sub21, 0
345 %sub25 = sub nsw i32 0, %sub21
346 %sub25.sub21 = select i1 %cmp22, i32 %sub25, i32 %sub21
347 %add27 = add nsw i32 %add16, %sub25.sub21
348 %arrayidx28 = getelementptr inbounds i8, i8* %p1.044, i64 3
349 %6 = load i8, i8* %arrayidx28, align 1
350 %conv29 = zext i8 %6 to i32
351 %arrayidx30 = getelementptr inbounds i8, i8* %p2.045, i64 3
352 %7 = load i8, i8* %arrayidx30, align 1
353 %conv31 = zext i8 %7 to i32
354 %sub32 = sub nsw i32 %conv29, %conv31
355 %cmp33 = icmp slt i32 %sub32, 0
356 %sub36 = sub nsw i32 0, %sub32
357 %v.3 = select i1 %cmp33, i32 %sub36, i32 %sub32
358 %add38 = add nsw i32 %add27, %v.3
359 %arrayidx39 = getelementptr inbounds i8, i8* %p1.044, i64 4
360 %8 = load i8, i8* %arrayidx39, align 1
361 %conv40 = zext i8 %8 to i32
362 %arrayidx41 = getelementptr inbounds i8, i8* %p2.045, i64 4
363 %9 = load i8, i8* %arrayidx41, align 1
364 %conv42 = zext i8 %9 to i32
365 %sub43 = sub nsw i32 %conv40, %conv42
366 %cmp44 = icmp slt i32 %sub43, 0
367 %sub47 = sub nsw i32 0, %sub43
368 %sub47.sub43 = select i1 %cmp44, i32 %sub47, i32 %sub43
369 %add49 = add nsw i32 %add38, %sub47.sub43
370 %arrayidx50 = getelementptr inbounds i8, i8* %p1.044, i64 5
371 %10 = load i8, i8* %arrayidx50, align 1
372 %conv51 = zext i8 %10 to i32
373 %arrayidx52 = getelementptr inbounds i8, i8* %p2.045, i64 5
374 %11 = load i8, i8* %arrayidx52, align 1
375 %conv53 = zext i8 %11 to i32
376 %sub54 = sub nsw i32 %conv51, %conv53
377 %cmp55 = icmp slt i32 %sub54, 0
378 %sub58 = sub nsw i32 0, %sub54
379 %v.5 = select i1 %cmp55, i32 %sub58, i32 %sub54
380 %add60 = add nsw i32 %add49, %v.5
381 %arrayidx61 = getelementptr inbounds i8, i8* %p1.044, i64 6
382 %12 = load i8, i8* %arrayidx61, align 1
383 %conv62 = zext i8 %12 to i32
384 %arrayidx63 = getelementptr inbounds i8, i8* %p2.045, i64 6
385 %13 = load i8, i8* %arrayidx63, align 1
386 %conv64 = zext i8 %13 to i32
387 %sub65 = sub nsw i32 %conv62, %conv64
388 %cmp66 = icmp slt i32 %sub65, 0
389 %sub69 = sub nsw i32 0, %sub65
390 %sub69.sub65 = select i1 %cmp66, i32 %sub69, i32 %sub65
391 %add71 = add nsw i32 %add60, %sub69.sub65
392 %arrayidx72 = getelementptr inbounds i8, i8* %p1.044, i64 7
393 %14 = load i8, i8* %arrayidx72, align 1
394 %conv73 = zext i8 %14 to i32
395 %arrayidx74 = getelementptr inbounds i8, i8* %p2.045, i64 7
396 %15 = load i8, i8* %arrayidx74, align 1
397 %conv75 = zext i8 %15 to i32
398 %sub76 = sub nsw i32 %conv73, %conv75
399 %cmp77 = icmp slt i32 %sub76, 0
400 %sub80 = sub nsw i32 0, %sub76
401 %v.7 = select i1 %cmp77, i32 %sub80, i32 %sub76
402 %add82 = add nsw i32 %add71, %v.7
403 %cmp83 = icmp slt i32 %add82, %lim
404 br i1 %cmp83, label %if.end.86, label %for.end.loopexit
406 if.end.86: ; preds = %for.body
407 %add.ptr = getelementptr inbounds i8, i8* %p1.044, i64 %idx.ext
408 %add.ptr88 = getelementptr inbounds i8, i8* %p2.045, i64 %idx.ext
409 %inc = add nuw nsw i32 %j.046, 1
410 %cmp = icmp slt i32 %inc, %h
411 br i1 %cmp, label %for.body, label %for.end.loopexit
413 for.end.loopexit: ; preds = %for.body, %if.end.86
416 for.end: ; preds = %for.end.loopexit, %entry
417 %s.1 = phi i32 [ 0, %entry ], [ %add82, %for.end.loopexit ]