1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes='loop(indvars),instcombine' -replexitval=always -S < %s | FileCheck %s
4 ;; Test that loop's exit value is rewritten to its initial
5 ;; value from loop preheader
6 define i32 @test1(ptr %var) {
9 ; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[VAR:%.*]], null
10 ; CHECK-NEXT: br label [[HEADER:%.*]]
12 ; CHECK-NEXT: br i1 [[COND]], label [[LOOP:%.*]], label [[EXIT:%.*]]
14 ; CHECK-NEXT: br label [[HEADER]]
16 ; CHECK-NEXT: ret i32 0
19 %cond = icmp eq ptr %var, null
23 %phi_indvar = phi i32 [0, %entry], [%indvar, %loop]
24 br i1 %cond, label %loop, label %exit
27 %indvar = add i32 %phi_indvar, 1
34 ;; Test that we can not rewrite loop exit value if it's not
35 ;; a phi node (%indvar is an add instruction in this test).
36 define i32 @test2(ptr %var) {
37 ; CHECK-LABEL: @test2(
39 ; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[VAR:%.*]], null
40 ; CHECK-NEXT: br label [[HEADER:%.*]]
42 ; CHECK-NEXT: [[PHI_INDVAR:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR:%.*]], [[HEADER]] ]
43 ; CHECK-NEXT: [[INDVAR]] = add i32 [[PHI_INDVAR]], 1
44 ; CHECK-NEXT: br i1 [[COND]], label [[HEADER]], label [[EXIT:%.*]]
46 ; CHECK-NEXT: ret i32 [[INDVAR]]
49 %cond = icmp eq ptr %var, null
53 %phi_indvar = phi i32 [0, %entry], [%indvar, %header]
54 %indvar = add i32 %phi_indvar, 1
55 br i1 %cond, label %header, label %exit
61 ;; Test that we can not rewrite loop exit value if the condition
62 ;; is not in loop header.
63 define i32 @test3(ptr %var) {
64 ; CHECK-LABEL: @test3(
66 ; CHECK-NEXT: [[COND1:%.*]] = icmp eq ptr [[VAR:%.*]], null
67 ; CHECK-NEXT: br label [[HEADER:%.*]]
69 ; CHECK-NEXT: [[PHI_INDVAR:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR:%.*]], [[HEADER_BACKEDGE:%.*]] ]
70 ; CHECK-NEXT: [[INDVAR]] = add i32 [[PHI_INDVAR]], 1
71 ; CHECK-NEXT: [[COND2:%.*]] = icmp eq i32 [[INDVAR]], 10
72 ; CHECK-NEXT: br i1 [[COND2]], label [[HEADER_BACKEDGE]], label [[BODY:%.*]]
73 ; CHECK: header.backedge:
74 ; CHECK-NEXT: br label [[HEADER]]
76 ; CHECK-NEXT: br i1 [[COND1]], label [[HEADER_BACKEDGE]], label [[EXIT:%.*]]
78 ; CHECK-NEXT: ret i32 [[PHI_INDVAR]]
81 %cond1 = icmp eq ptr %var, null
85 %phi_indvar = phi i32 [0, %entry], [%indvar, %header], [%indvar, %body]
86 %indvar = add i32 %phi_indvar, 1
87 %cond2 = icmp eq i32 %indvar, 10
88 br i1 %cond2, label %header, label %body
91 br i1 %cond1, label %header, label %exit
98 ; Multiple exits dominating latch
99 define i32 @test4(i1 %cond1, i1 %cond2) {
100 ; CHECK-LABEL: @test4(
102 ; CHECK-NEXT: br label [[HEADER:%.*]]
104 ; CHECK-NEXT: br i1 [[COND1:%.*]], label [[LOOP:%.*]], label [[EXIT:%.*]]
106 ; CHECK-NEXT: br i1 [[COND2:%.*]], label [[HEADER]], label [[EXIT]]
108 ; CHECK-NEXT: ret i32 0
114 %phi_indvar = phi i32 [0, %entry], [%indvar, %loop]
115 br i1 %cond1, label %loop, label %exit
118 %indvar = add i32 %phi_indvar, 1
119 br i1 %cond2, label %header, label %exit
125 ; A conditionally executed exit.
126 define i32 @test5(ptr %addr, i1 %cond2) {
127 ; CHECK-LABEL: @test5(
129 ; CHECK-NEXT: br label [[HEADER:%.*]]
131 ; CHECK-NEXT: [[PHI_INDVAR:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR:%.*]], [[LOOP:%.*]] ]
132 ; CHECK-NEXT: [[COND1:%.*]] = load volatile i1, ptr [[ADDR:%.*]], align 1
133 ; CHECK-NEXT: br i1 [[COND1]], label [[LOOP]], label [[MAYBE:%.*]]
135 ; CHECK-NEXT: br i1 [[COND2:%.*]], label [[LOOP]], label [[EXIT:%.*]]
137 ; CHECK-NEXT: [[INDVAR]] = add i32 [[PHI_INDVAR]], 1
138 ; CHECK-NEXT: br label [[HEADER]]
140 ; CHECK-NEXT: ret i32 [[PHI_INDVAR]]
146 %phi_indvar = phi i32 [0, %entry], [%indvar, %loop]
147 %cond1 = load volatile i1, ptr %addr
148 br i1 %cond1, label %loop, label %maybe
151 br i1 %cond2, label %loop, label %exit
154 %indvar = add i32 %phi_indvar, 1
161 define i16 @pr57336(i16 %end, i16 %m) mustprogress {
162 ; CHECK-LABEL: @pr57336(
164 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
166 ; CHECK-NEXT: [[INC8:%.*]] = phi i16 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
167 ; CHECK-NEXT: [[INC]] = add nuw nsw i16 [[INC8]], 1
168 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i16 [[INC8]], [[M:%.*]]
169 ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i16 [[MUL]], [[END:%.*]]
170 ; CHECK-NEXT: br i1 [[CMP_NOT]], label [[CRIT_EDGE:%.*]], label [[FOR_BODY]]
172 ; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[END]], 1
173 ; CHECK-NEXT: [[SMAX:%.*]] = call i16 @llvm.smax.i16(i16 [[TMP0]], i16 0)
174 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i16 [[END]], 32767
175 ; CHECK-NEXT: [[UMIN:%.*]] = zext i1 [[TMP1]] to i16
176 ; CHECK-NEXT: [[TMP2:%.*]] = sub nsw i16 [[SMAX]], [[UMIN]]
177 ; CHECK-NEXT: [[UMAX:%.*]] = call i16 @llvm.umax.i16(i16 [[M]], i16 1)
178 ; CHECK-NEXT: [[TMP3:%.*]] = udiv i16 [[TMP2]], [[UMAX]]
179 ; CHECK-NEXT: [[TMP4:%.*]] = add i16 [[TMP3]], [[UMIN]]
180 ; CHECK-NEXT: ret i16 [[TMP4]]
186 %inc8 = phi i16 [ %inc, %for.body ], [ 0, %entry ]
187 %inc137 = phi i32 [ %inc1, %for.body ], [ 0, %entry ]
188 %inc1 = add nsw i32 %inc137, 1
189 %inc = add nsw i16 %inc8, 1
190 %mul = mul nsw i16 %m, %inc8
191 %cmp.not = icmp slt i16 %end, %mul
192 br i1 %cmp.not, label %crit_edge, label %for.body
195 %inc137.lcssa = phi i32 [ %inc137, %for.body ]
196 %conv = trunc i32 %inc137.lcssa to i16
200 define i32 @vscale_slt_with_vp_umin(ptr nocapture %A, i32 %n) mustprogress vscale_range(2,1024) {
201 ; CHECK-LABEL: @vscale_slt_with_vp_umin(
203 ; CHECK-NEXT: [[VSCALE:%.*]] = call i32 @llvm.vscale.i32()
204 ; CHECK-NEXT: [[VF:%.*]] = shl nuw nsw i32 [[VSCALE]], 2
205 ; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[N:%.*]], 0
206 ; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY_PREHEADER:%.*]], label [[EARLY_EXIT:%.*]]
207 ; CHECK: for.body.preheader:
208 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
210 ; CHECK-NEXT: ret i32 0
212 ; CHECK-NEXT: [[I_05:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
213 ; CHECK-NEXT: [[LEFT:%.*]] = sub nsw i32 [[N]], [[I_05]]
214 ; CHECK-NEXT: [[VF_CAPPED:%.*]] = call i32 @llvm.umin.i32(i32 [[VF]], i32 [[LEFT]])
215 ; CHECK-NEXT: store i32 [[VF_CAPPED]], ptr [[A:%.*]], align 4
216 ; CHECK-NEXT: [[ADD]] = add nuw nsw i32 [[I_05]], [[VF]]
217 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD]], [[N]]
218 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
220 ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i32 [[N]], -1
221 ; CHECK-NEXT: [[TMP1:%.*]] = udiv i32 [[TMP0]], [[VF]]
222 ; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP1]], [[VSCALE]]
223 ; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[TMP2]], 2
224 ; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[N]], [[TMP3]]
225 ; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[VF]], i32 [[TMP4]])
226 ; CHECK-NEXT: ret i32 [[UMIN]]
229 %vscale = call i32 @llvm.vscale.i32()
230 %VF = shl nuw nsw i32 %vscale, 2
231 %cmp4 = icmp sgt i32 %n, 0
232 br i1 %cmp4, label %for.body, label %early.exit
238 %i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
239 %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
240 %left = sub i32 %n, %i.05
241 %VF.capped = call i32 @llvm.umin.i32(i32 %VF, i32 %left)
242 store i32 %VF.capped, ptr %A
244 %add = add nsw i32 %i.05, %VF
245 %cmp = icmp slt i32 %add, %n
246 br i1 %cmp, label %for.body, label %for.end
252 define i32 @vscale_slt_with_vp_umin2(ptr nocapture %A, i32 %n) mustprogress vscale_range(2,1024) {
253 ; CHECK-LABEL: @vscale_slt_with_vp_umin2(
255 ; CHECK-NEXT: [[VSCALE:%.*]] = call i32 @llvm.vscale.i32()
256 ; CHECK-NEXT: [[VF:%.*]] = shl nuw nsw i32 [[VSCALE]], 2
257 ; CHECK-NEXT: [[CMP4:%.*]] = icmp slt i32 [[VF]], [[N:%.*]]
258 ; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY_PREHEADER:%.*]], label [[EARLY_EXIT:%.*]]
259 ; CHECK: for.body.preheader:
260 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
262 ; CHECK-NEXT: ret i32 0
264 ; CHECK-NEXT: [[I_05:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
265 ; CHECK-NEXT: [[LEFT:%.*]] = sub i32 [[N]], [[I_05]]
266 ; CHECK-NEXT: [[VF_CAPPED:%.*]] = call i32 @llvm.umin.i32(i32 [[VF]], i32 [[LEFT]])
267 ; CHECK-NEXT: store i32 [[VF_CAPPED]], ptr [[A:%.*]], align 4
268 ; CHECK-NEXT: [[ADD]] = add nuw nsw i32 [[I_05]], [[VF]]
269 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD]], [[N]]
270 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
272 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1
273 ; CHECK-NEXT: [[TMP1:%.*]] = udiv i32 [[TMP0]], [[VF]]
274 ; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP1]], [[VSCALE]]
275 ; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[TMP2]], 2
276 ; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[N]], [[TMP3]]
277 ; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[VF]], i32 [[TMP4]])
278 ; CHECK-NEXT: ret i32 [[UMIN]]
281 %vscale = call i32 @llvm.vscale.i32()
282 %VF = shl nuw nsw i32 %vscale, 2
283 %cmp4 = icmp sgt i32 %n, %VF
284 br i1 %cmp4, label %for.body, label %early.exit
290 %i.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
291 %arrayidx = getelementptr inbounds i32, ptr %A, i32 %i.05
292 %left = sub i32 %n, %i.05
293 %VF.capped = call i32 @llvm.umin.i32(i32 %VF, i32 %left)
294 store i32 %VF.capped, ptr %A
296 %add = add nsw i32 %i.05, %VF
297 %cmp = icmp slt i32 %add, %n
298 br i1 %cmp, label %for.body, label %for.end