1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2 ; RUN: opt -S < %s -p loop-vectorize -enable-early-exit-vectorization -force-vector-width=4 | FileCheck %s
4 declare void @init_mem(ptr, i64);
6 define i64 @same_exit_block_pre_inc_use1() {
7 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use1() {
9 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
10 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
11 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
12 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
13 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
15 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
17 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
18 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
19 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
20 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
21 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
22 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
23 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
24 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
25 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
26 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
27 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
28 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
29 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
30 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
31 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
32 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
33 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
34 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
35 ; CHECK: middle.split:
36 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
37 ; CHECK: vector.early.exit:
38 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
39 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
40 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
41 ; CHECK: middle.block:
42 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
44 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
45 ; CHECK-NEXT: br label [[LOOP:%.*]]
47 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
48 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
49 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
50 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
51 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
52 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
53 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
55 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
56 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
57 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP3:![0-9]+]]
59 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
60 ; CHECK-NEXT: ret i64 [[RETVAL]]
63 %p1 = alloca [1024 x i8]
64 %p2 = alloca [1024 x i8]
65 call void @init_mem(ptr %p1, i64 1024)
66 call void @init_mem(ptr %p2, i64 1024)
70 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
71 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
72 %ld1 = load i8, ptr %arrayidx, align 1
73 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
74 %ld2 = load i8, ptr %arrayidx1, align 1
75 %cmp3 = icmp eq i8 %ld1, %ld2
76 br i1 %cmp3, label %loop.inc, label %loop.end
79 %index.next = add i64 %index, 1
80 %exitcond = icmp ne i64 %index.next, 67
81 br i1 %exitcond, label %loop, label %loop.end
84 %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
89 define i64 @same_exit_block_pre_inc1_use_inv_cond(i1 %cond) {
90 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc1_use_inv_cond(
91 ; CHECK-SAME: i1 [[COND:%.*]]) {
93 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
94 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
95 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
96 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
97 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
99 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
100 ; CHECK: vector.body:
101 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
102 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
103 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
104 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
105 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
106 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
107 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
108 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
109 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
110 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
111 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
112 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[COND]], <4 x i1> [[TMP5]], <4 x i1> zeroinitializer
113 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
114 ; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP6]], splat (i1 true)
115 ; CHECK-NEXT: [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP7]])
116 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
117 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
118 ; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]]
119 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
120 ; CHECK: middle.split:
121 ; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
122 ; CHECK: vector.early.exit:
123 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP7]], i1 true)
124 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
125 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
126 ; CHECK: middle.block:
127 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
129 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
130 ; CHECK-NEXT: br label [[LOOP:%.*]]
132 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
133 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
134 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
135 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
136 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
137 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
138 ; CHECK-NEXT: [[CMP4:%.*]] = select i1 [[COND]], i1 [[CMP3]], i1 false
139 ; CHECK-NEXT: br i1 [[CMP4]], label [[LOOP_INC]], label [[LOOP_END]]
141 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
142 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
143 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP5:![0-9]+]]
145 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
146 ; CHECK-NEXT: ret i64 [[RETVAL]]
149 %p1 = alloca [1024 x i8]
150 %p2 = alloca [1024 x i8]
151 call void @init_mem(ptr %p1, i64 1024)
152 call void @init_mem(ptr %p2, i64 1024)
156 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
157 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
158 %ld1 = load i8, ptr %arrayidx, align 1
159 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
160 %ld2 = load i8, ptr %arrayidx1, align 1
161 %cmp3 = icmp eq i8 %ld1, %ld2
162 %cmp4 = select i1 %cond, i1 %cmp3, i1 false
163 br i1 %cmp4, label %loop.inc, label %loop.end
166 %index.next = add i64 %index, 1
167 %exitcond = icmp ne i64 %index.next, 67
168 br i1 %exitcond, label %loop, label %loop.end
171 %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
176 define i64 @same_exit_block_pre_inc_use1_gep_two_indices() {
177 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use1_gep_two_indices() {
179 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
180 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
181 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
182 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
183 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
185 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
186 ; CHECK: vector.body:
187 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
188 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
189 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
190 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
191 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P1]], i64 0, i64 [[TMP0]]
192 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
193 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
194 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P2]], i64 0, i64 [[TMP0]]
195 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
196 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
197 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
198 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
199 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
200 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
201 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
202 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
203 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
204 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
205 ; CHECK: middle.split:
206 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
207 ; CHECK: vector.early.exit:
208 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
209 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
210 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
211 ; CHECK: middle.block:
212 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
214 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
215 ; CHECK-NEXT: br label [[LOOP:%.*]]
217 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
218 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P1]], i64 0, i64 [[INDEX]]
219 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
220 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], ptr [[P2]], i64 0, i64 [[INDEX]]
221 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
222 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
223 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
225 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
226 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
227 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP7:![0-9]+]]
229 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
230 ; CHECK-NEXT: ret i64 [[RETVAL]]
233 %p1 = alloca [1024 x i8]
234 %p2 = alloca [1024 x i8]
235 call void @init_mem(ptr %p1, i64 1024)
236 call void @init_mem(ptr %p2, i64 1024)
240 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
241 %arrayidx = getelementptr inbounds [1024 x i8], ptr %p1, i64 0, i64 %index
242 %ld1 = load i8, ptr %arrayidx, align 1
243 %arrayidx1 = getelementptr inbounds [1024 x i8], ptr %p2, i64 0, i64 %index
244 %ld2 = load i8, ptr %arrayidx1, align 1
245 %cmp3 = icmp eq i8 %ld1, %ld2
246 br i1 %cmp3, label %loop.inc, label %loop.end
249 %index.next = add i64 %index, 1
250 %exitcond = icmp ne i64 %index.next, 67
251 br i1 %exitcond, label %loop, label %loop.end
254 %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
259 define i64 @same_exit_block_pre_inc_use1_alloca_diff_type() {
260 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use1_alloca_diff_type() {
262 ; CHECK-NEXT: [[P1:%.*]] = alloca [40 x i32], align 4
263 ; CHECK-NEXT: [[P2:%.*]] = alloca [40 x i32], align 4
264 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
265 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
266 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
268 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
269 ; CHECK: vector.body:
270 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
271 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
272 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
273 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
274 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
275 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
276 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
277 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
278 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
279 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
280 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
281 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
282 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
283 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
284 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
285 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
286 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
287 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
288 ; CHECK: middle.split:
289 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
290 ; CHECK: vector.early.exit:
291 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
292 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
293 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
294 ; CHECK: middle.block:
295 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
297 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
298 ; CHECK-NEXT: br label [[LOOP:%.*]]
300 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
301 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
302 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
303 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
304 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
305 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
306 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
308 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
309 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
310 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP9:![0-9]+]]
312 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
313 ; CHECK-NEXT: ret i64 [[RETVAL]]
316 %p1 = alloca [40 x i32]
317 %p2 = alloca [40 x i32]
318 call void @init_mem(ptr %p1, i64 1024)
319 call void @init_mem(ptr %p2, i64 1024)
323 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
324 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
325 %ld1 = load i8, ptr %arrayidx, align 1
326 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
327 %ld2 = load i8, ptr %arrayidx1, align 1
328 %cmp3 = icmp eq i8 %ld1, %ld2
329 br i1 %cmp3, label %loop.inc, label %loop.end
332 %index.next = add i64 %index, 1
333 %exitcond = icmp ne i64 %index.next, 67
334 br i1 %exitcond, label %loop, label %loop.end
337 %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
342 define i64 @same_exit_block_pre_inc_use2() {
343 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use2() {
345 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
346 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
347 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
348 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
349 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
351 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
352 ; CHECK: vector.body:
353 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
354 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
355 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
356 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
357 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
358 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
359 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
360 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
361 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
362 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
363 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
364 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
365 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
366 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
367 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
368 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
369 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
370 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
371 ; CHECK: middle.split:
372 ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[VEC_IND]], i32 3
373 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
374 ; CHECK: vector.early.exit:
375 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
376 ; CHECK: middle.block:
377 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
379 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
380 ; CHECK-NEXT: br label [[LOOP:%.*]]
382 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
383 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
384 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
385 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
386 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
387 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
388 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
390 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
391 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
392 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP11:![0-9]+]]
394 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ 67, [[LOOP]] ], [ [[INDEX]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ 67, [[VECTOR_EARLY_EXIT]] ]
395 ; CHECK-NEXT: ret i64 [[RETVAL]]
398 %p1 = alloca [1024 x i8]
399 %p2 = alloca [1024 x i8]
400 call void @init_mem(ptr %p1, i64 1024)
401 call void @init_mem(ptr %p2, i64 1024)
405 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
406 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
407 %ld1 = load i8, ptr %arrayidx, align 1
408 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
409 %ld2 = load i8, ptr %arrayidx1, align 1
410 %cmp3 = icmp eq i8 %ld1, %ld2
411 br i1 %cmp3, label %loop.inc, label %loop.end
414 %index.next = add i64 %index, 1
415 %exitcond = icmp ne i64 %index.next, 67
416 br i1 %exitcond, label %loop, label %loop.end
419 %retval = phi i64 [ 67, %loop ], [ %index, %loop.inc ]
424 define i64 @same_exit_block_pre_inc_use3() {
425 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use3() {
427 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
428 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
429 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
430 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
431 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
433 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
434 ; CHECK: vector.body:
435 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
436 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
437 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
438 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
439 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
440 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
441 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
442 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
443 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
444 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
445 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
446 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
447 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
448 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
449 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
450 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
451 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
452 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
453 ; CHECK: middle.split:
454 ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[VEC_IND]], i32 3
455 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
456 ; CHECK: vector.early.exit:
457 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
458 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
459 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
460 ; CHECK: middle.block:
461 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
463 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
464 ; CHECK-NEXT: br label [[LOOP:%.*]]
466 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
467 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
468 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
469 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
470 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
471 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
472 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
474 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
475 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
476 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP13:![0-9]+]]
478 ; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ [[INDEX]], [[LOOP]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
479 ; CHECK-NEXT: ret i64 [[INDEX_LCSSA]]
482 %p1 = alloca [1024 x i8]
483 %p2 = alloca [1024 x i8]
484 call void @init_mem(ptr %p1, i64 1024)
485 call void @init_mem(ptr %p2, i64 1024)
489 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
490 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
491 %ld1 = load i8, ptr %arrayidx, align 1
492 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
493 %ld2 = load i8, ptr %arrayidx1, align 1
494 %cmp3 = icmp eq i8 %ld1, %ld2
495 br i1 %cmp3, label %loop.inc, label %loop.end
498 %index.next = add i64 %index, 1
499 %exitcond = icmp ne i64 %index.next, 67
500 br i1 %exitcond, label %loop, label %loop.end
507 ; In this example the early exit block appears in the list of ExitNotTaken
508 ; SCEVs, but is not computable.
509 define i64 @same_exit_block_pre_inc_use4() {
510 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use4() {
512 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i64], align 8
513 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i64], align 8
514 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
515 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
516 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
518 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
519 ; CHECK: vector.body:
520 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
521 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
522 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
523 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
524 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[TMP0]]
525 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 0
526 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 1
527 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ult <4 x i64> [[VEC_IND]], [[WIDE_LOAD]]
528 ; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 4
529 ; CHECK-NEXT: [[TMP4:%.*]] = xor <4 x i1> [[TMP3]], splat (i1 true)
530 ; CHECK-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP4]])
531 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 64
532 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
533 ; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
534 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
535 ; CHECK: middle.split:
536 ; CHECK-NEXT: br i1 [[TMP5]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
537 ; CHECK: vector.early.exit:
538 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP4]], i1 true)
539 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
540 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
541 ; CHECK: middle.block:
542 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
544 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
545 ; CHECK-NEXT: br label [[LOOP:%.*]]
547 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
548 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[P1]], i64 [[INDEX]]
549 ; CHECK-NEXT: [[LD1:%.*]] = load i64, ptr [[ARRAYIDX]], align 1
550 ; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[INDEX]], [[LD1]]
551 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
553 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
554 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
555 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP15:![0-9]+]]
557 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
558 ; CHECK-NEXT: ret i64 [[RETVAL]]
561 %p1 = alloca [1024 x i64]
562 %p2 = alloca [1024 x i64]
563 call void @init_mem(ptr %p1, i64 1024)
564 call void @init_mem(ptr %p2, i64 1024)
568 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
569 %arrayidx = getelementptr inbounds i64, ptr %p1, i64 %index
570 %ld1 = load i64, ptr %arrayidx, align 1
571 %cmp3 = icmp ult i64 %index, %ld1
572 br i1 %cmp3, label %loop.inc, label %loop.end
575 %index.next = add i64 %index, 1
576 %exitcond = icmp ne i64 %index.next, 67
577 br i1 %exitcond, label %loop, label %loop.end
580 %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
585 define i64 @same_exit_block_post_inc_use() {
586 ; CHECK-LABEL: define i64 @same_exit_block_post_inc_use() {
588 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
589 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
590 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
591 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
592 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
594 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
595 ; CHECK: vector.body:
596 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
597 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
598 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
599 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
600 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 1
601 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 2
602 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 3
603 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
604 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
605 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1
606 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
607 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 0
608 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP7]], align 1
609 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
610 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP0]], 1
611 ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP1]], 1
612 ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP2]], 1
613 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP3]], 1
614 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
615 ; CHECK-NEXT: [[TMP13:%.*]] = xor <4 x i1> [[TMP8]], splat (i1 true)
616 ; CHECK-NEXT: [[TMP14:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP13]])
617 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
618 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
619 ; CHECK-NEXT: [[TMP16:%.*]] = or i1 [[TMP14]], [[TMP15]]
620 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
621 ; CHECK: middle.split:
622 ; CHECK-NEXT: br i1 [[TMP14]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
623 ; CHECK: vector.early.exit:
624 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP13]], i1 true)
625 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
626 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
627 ; CHECK: middle.block:
628 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
630 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
631 ; CHECK-NEXT: br label [[LOOP:%.*]]
633 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
634 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
635 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
636 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
637 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
638 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
639 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
641 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
642 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
643 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP17:![0-9]+]]
645 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
646 ; CHECK-NEXT: ret i64 [[RETVAL]]
649 %p1 = alloca [1024 x i8]
650 %p2 = alloca [1024 x i8]
651 call void @init_mem(ptr %p1, i64 1024)
652 call void @init_mem(ptr %p2, i64 1024)
656 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
657 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
658 %ld1 = load i8, ptr %arrayidx, align 1
659 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
660 %ld2 = load i8, ptr %arrayidx1, align 1
661 %cmp3 = icmp eq i8 %ld1, %ld2
662 br i1 %cmp3, label %loop.inc, label %loop.end
665 %index.next = add i64 %index, 1
666 %exitcond = icmp ne i64 %index.next, 67
667 br i1 %exitcond, label %loop, label %loop.end
670 %retval = phi i64 [ %index, %loop ], [ %index.next, %loop.inc ]
675 define i64 @same_exit_block_post_inc_use2() {
676 ; CHECK-LABEL: define i64 @same_exit_block_post_inc_use2() {
678 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
679 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
680 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
681 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
682 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
684 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
685 ; CHECK: vector.body:
686 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
687 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
688 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
689 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
690 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 1
691 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 2
692 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 3
693 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
694 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
695 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1
696 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
697 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 0
698 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP7]], align 1
699 ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP0]], 1
700 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP1]], 1
701 ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP2]], 1
702 ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP3]], 1
703 ; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i32 0
704 ; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 1
705 ; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 2
706 ; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 3
707 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
708 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
709 ; CHECK-NEXT: [[TMP17:%.*]] = xor <4 x i1> [[TMP16]], splat (i1 true)
710 ; CHECK-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP17]])
711 ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
712 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
713 ; CHECK-NEXT: [[TMP20:%.*]] = or i1 [[TMP18]], [[TMP19]]
714 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
715 ; CHECK: middle.split:
716 ; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i64> [[VEC_IND]], i32 3
717 ; CHECK-NEXT: br i1 [[TMP18]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
718 ; CHECK: vector.early.exit:
719 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP17]], i1 true)
720 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[TMP15]], i64 [[FIRST_ACTIVE_LANE]]
721 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
722 ; CHECK: middle.block:
723 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
725 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
726 ; CHECK-NEXT: br label [[LOOP:%.*]]
728 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
729 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
730 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
731 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
732 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
733 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
734 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
735 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
737 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
738 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP19:![0-9]+]]
740 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP]] ], [ [[INDEX]], [[LOOP_INC]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
741 ; CHECK-NEXT: ret i64 [[RETVAL]]
744 %p1 = alloca [1024 x i8]
745 %p2 = alloca [1024 x i8]
746 call void @init_mem(ptr %p1, i64 1024)
747 call void @init_mem(ptr %p2, i64 1024)
751 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
752 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
753 %ld1 = load i8, ptr %arrayidx, align 1
754 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
755 %ld2 = load i8, ptr %arrayidx1, align 1
756 %index.next = add i64 %index, 1
757 %cmp3 = icmp eq i8 %ld1, %ld2
758 br i1 %cmp3, label %loop.inc, label %loop.end
761 %exitcond = icmp ne i64 %index.next, 67
762 br i1 %exitcond, label %loop, label %loop.end
765 %retval = phi i64 [ %index.next, %loop ], [ %index, %loop.inc ]
770 define i64 @diff_exit_block_pre_inc_use1() {
771 ; CHECK-LABEL: define i64 @diff_exit_block_pre_inc_use1() {
773 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
774 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
775 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
776 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
777 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
779 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
780 ; CHECK: vector.body:
781 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
782 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
783 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
784 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
785 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
786 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
787 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
788 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
789 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
790 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
791 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
792 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
793 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
794 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
795 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
796 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
797 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
798 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
799 ; CHECK: middle.split:
800 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
801 ; CHECK: vector.early.exit:
802 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
803 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
804 ; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
805 ; CHECK: middle.block:
806 ; CHECK-NEXT: br i1 true, label [[LOOP_END:%.*]], label [[SCALAR_PH]]
808 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
809 ; CHECK-NEXT: br label [[LOOP:%.*]]
811 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
812 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
813 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
814 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
815 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
816 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
817 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
819 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
820 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
821 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP21:![0-9]+]]
822 ; CHECK: loop.early.exit:
823 ; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
824 ; CHECK-NEXT: ret i64 [[RETVAL1]]
826 ; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ]
827 ; CHECK-NEXT: ret i64 [[RETVAL2]]
830 %p1 = alloca [1024 x i8]
831 %p2 = alloca [1024 x i8]
832 call void @init_mem(ptr %p1, i64 1024)
833 call void @init_mem(ptr %p2, i64 1024)
837 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
838 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
839 %ld1 = load i8, ptr %arrayidx, align 1
840 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
841 %ld2 = load i8, ptr %arrayidx1, align 1
842 %cmp3 = icmp eq i8 %ld1, %ld2
843 br i1 %cmp3, label %loop.inc, label %loop.early.exit
846 %index.next = add i64 %index, 1
847 %exitcond = icmp ne i64 %index.next, 67
848 br i1 %exitcond, label %loop, label %loop.end
851 %retval1 = phi i64 [ %index, %loop ]
855 %retval2 = phi i64 [ 67, %loop.inc ]
860 define i64 @diff_exit_block_pre_inc_use2() {
861 ; CHECK-LABEL: define i64 @diff_exit_block_pre_inc_use2() {
863 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
864 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
865 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
866 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
867 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
869 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
870 ; CHECK: vector.body:
871 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
872 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
873 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
874 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
875 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
876 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
877 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
878 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
879 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
880 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
881 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
882 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
883 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
884 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
885 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
886 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
887 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
888 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
889 ; CHECK: middle.split:
890 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
891 ; CHECK: vector.early.exit:
892 ; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
893 ; CHECK: middle.block:
894 ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[VEC_IND]], i32 3
895 ; CHECK-NEXT: br i1 true, label [[LOOP_END:%.*]], label [[SCALAR_PH]]
897 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
898 ; CHECK-NEXT: br label [[LOOP:%.*]]
900 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
901 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
902 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
903 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
904 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
905 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
906 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
908 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
909 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
910 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP23:![0-9]+]]
911 ; CHECK: loop.early.exit:
912 ; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ 67, [[LOOP]] ], [ 67, [[VECTOR_EARLY_EXIT]] ]
913 ; CHECK-NEXT: ret i64 [[RETVAL1]]
915 ; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
916 ; CHECK-NEXT: ret i64 [[RETVAL2]]
919 %p1 = alloca [1024 x i8]
920 %p2 = alloca [1024 x i8]
921 call void @init_mem(ptr %p1, i64 1024)
922 call void @init_mem(ptr %p2, i64 1024)
926 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
927 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
928 %ld1 = load i8, ptr %arrayidx, align 1
929 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
930 %ld2 = load i8, ptr %arrayidx1, align 1
931 %cmp3 = icmp eq i8 %ld1, %ld2
932 br i1 %cmp3, label %loop.inc, label %loop.early.exit
935 %index.next = add i64 %index, 1
936 %exitcond = icmp ne i64 %index.next, 67
937 br i1 %exitcond, label %loop, label %loop.end
940 %retval1 = phi i64 [ 67, %loop ]
944 %retval2 = phi i64 [ %index, %loop.inc ]
949 define i64 @diff_exit_block_pre_inc_use3() {
950 ; CHECK-LABEL: define i64 @diff_exit_block_pre_inc_use3() {
952 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
953 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
954 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
955 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
956 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
958 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
959 ; CHECK: vector.body:
960 ; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT4:%.*]], [[VECTOR_BODY]] ]
961 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
962 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX2]]
963 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
964 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
965 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
966 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
967 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
968 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
969 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
970 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD3]]
971 ; CHECK-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX2]], 4
972 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
973 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
974 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 64
975 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
976 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
977 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
978 ; CHECK: middle.split:
979 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
980 ; CHECK: vector.early.exit:
981 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
982 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
983 ; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
984 ; CHECK: middle.block:
985 ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[VEC_IND]], i32 3
986 ; CHECK-NEXT: br i1 true, label [[LOOP_END:%.*]], label [[SCALAR_PH]]
988 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
989 ; CHECK-NEXT: br label [[LOOP:%.*]]
991 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
992 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
993 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
994 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
995 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
996 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
997 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
999 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
1000 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
1001 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP25:![0-9]+]]
1002 ; CHECK: loop.early.exit:
1003 ; CHECK-NEXT: [[INDEX_LCSSA:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
1004 ; CHECK-NEXT: ret i64 [[INDEX_LCSSA]]
1006 ; CHECK-NEXT: [[INDEX_LCSSA1:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ]
1007 ; CHECK-NEXT: ret i64 [[INDEX_LCSSA1]]
1010 %p1 = alloca [1024 x i8]
1011 %p2 = alloca [1024 x i8]
1012 call void @init_mem(ptr %p1, i64 1024)
1013 call void @init_mem(ptr %p2, i64 1024)
1017 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
1018 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
1019 %ld1 = load i8, ptr %arrayidx, align 1
1020 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
1021 %ld2 = load i8, ptr %arrayidx1, align 1
1022 %cmp3 = icmp eq i8 %ld1, %ld2
1023 br i1 %cmp3, label %loop.inc, label %loop.early.exit
1026 %index.next = add i64 %index, 1
1027 %exitcond = icmp ne i64 %index.next, 67
1028 br i1 %exitcond, label %loop, label %loop.end
1038 define i64 @diff_exit_block_post_inc_use1() {
1039 ; CHECK-LABEL: define i64 @diff_exit_block_post_inc_use1() {
1040 ; CHECK-NEXT: entry:
1041 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
1042 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
1043 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
1044 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
1045 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1047 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1048 ; CHECK: vector.body:
1049 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
1050 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1051 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
1052 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1053 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 1
1054 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 2
1055 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 3
1056 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
1057 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
1058 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1
1059 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
1060 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 0
1061 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP7]], align 1
1062 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
1063 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP0]], 1
1064 ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP1]], 1
1065 ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP2]], 1
1066 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP3]], 1
1067 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
1068 ; CHECK-NEXT: [[TMP13:%.*]] = xor <4 x i1> [[TMP8]], splat (i1 true)
1069 ; CHECK-NEXT: [[TMP14:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP13]])
1070 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
1071 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
1072 ; CHECK-NEXT: [[TMP16:%.*]] = or i1 [[TMP14]], [[TMP15]]
1073 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
1074 ; CHECK: middle.split:
1075 ; CHECK-NEXT: br i1 [[TMP14]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
1076 ; CHECK: vector.early.exit:
1077 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP13]], i1 true)
1078 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
1079 ; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
1080 ; CHECK: middle.block:
1081 ; CHECK-NEXT: br i1 true, label [[LOOP_END:%.*]], label [[SCALAR_PH]]
1083 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
1084 ; CHECK-NEXT: br label [[LOOP:%.*]]
1086 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1087 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
1088 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
1089 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
1090 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
1091 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
1092 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
1094 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
1095 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
1096 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP27:![0-9]+]]
1097 ; CHECK: loop.early.exit:
1098 ; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
1099 ; CHECK-NEXT: ret i64 [[RETVAL1]]
1101 ; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ]
1102 ; CHECK-NEXT: ret i64 [[RETVAL2]]
1105 %p1 = alloca [1024 x i8]
1106 %p2 = alloca [1024 x i8]
1107 call void @init_mem(ptr %p1, i64 1024)
1108 call void @init_mem(ptr %p2, i64 1024)
1112 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
1113 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
1114 %ld1 = load i8, ptr %arrayidx, align 1
1115 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
1116 %ld2 = load i8, ptr %arrayidx1, align 1
1117 %cmp3 = icmp eq i8 %ld1, %ld2
1118 br i1 %cmp3, label %loop.inc, label %loop.early.exit
1121 %index.next = add i64 %index, 1
1122 %exitcond = icmp ne i64 %index.next, 67
1123 br i1 %exitcond, label %loop, label %loop.end
1126 %retval1 = phi i64 [ %index, %loop ]
1130 %retval2 = phi i64 [ %index.next, %loop.inc ]
1135 define i64 @diff_exit_block_post_inc_use2() {
1136 ; CHECK-LABEL: define i64 @diff_exit_block_post_inc_use2() {
1137 ; CHECK-NEXT: entry:
1138 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
1139 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
1140 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
1141 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
1142 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1144 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1145 ; CHECK: vector.body:
1146 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
1147 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1148 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
1149 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1150 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 1
1151 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 2
1152 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 3
1153 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], 1
1154 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP1]], 1
1155 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP2]], 1
1156 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP3]], 1
1157 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i64> poison, i64 [[TMP4]], i32 0
1158 ; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i64> [[TMP8]], i64 [[TMP5]], i32 1
1159 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i64> [[TMP9]], i64 [[TMP6]], i32 2
1160 ; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP7]], i32 3
1161 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
1162 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 0
1163 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP13]], align 1
1164 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
1165 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i32 0
1166 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP15]], align 1
1167 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
1168 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
1169 ; CHECK-NEXT: [[TMP17:%.*]] = xor <4 x i1> [[TMP16]], splat (i1 true)
1170 ; CHECK-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP17]])
1171 ; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
1172 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
1173 ; CHECK-NEXT: [[TMP20:%.*]] = or i1 [[TMP18]], [[TMP19]]
1174 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
1175 ; CHECK: middle.split:
1176 ; CHECK-NEXT: br i1 [[TMP18]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
1177 ; CHECK: vector.early.exit:
1178 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP17]], i1 true)
1179 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[TMP11]], i64 [[FIRST_ACTIVE_LANE]]
1180 ; CHECK-NEXT: br label [[LOOP_EARLY_EXIT:%.*]]
1181 ; CHECK: middle.block:
1182 ; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i64> [[VEC_IND]], i32 3
1183 ; CHECK-NEXT: br i1 true, label [[LOOP_END:%.*]], label [[SCALAR_PH]]
1185 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
1186 ; CHECK-NEXT: br label [[LOOP:%.*]]
1188 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1189 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
1190 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
1191 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
1192 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
1193 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
1194 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
1195 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_EARLY_EXIT]]
1197 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
1198 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP29:![0-9]+]]
1199 ; CHECK: loop.early.exit:
1200 ; CHECK-NEXT: [[RETVAL1:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
1201 ; CHECK-NEXT: ret i64 [[RETVAL1]]
1203 ; CHECK-NEXT: [[RETVAL2:%.*]] = phi i64 [ [[INDEX]], [[LOOP_INC]] ], [ [[TMP21]], [[MIDDLE_BLOCK]] ]
1204 ; CHECK-NEXT: ret i64 [[RETVAL2]]
1207 %p1 = alloca [1024 x i8]
1208 %p2 = alloca [1024 x i8]
1209 call void @init_mem(ptr %p1, i64 1024)
1210 call void @init_mem(ptr %p2, i64 1024)
1214 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
1215 %index.next = add i64 %index, 1
1216 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
1217 %ld1 = load i8, ptr %arrayidx, align 1
1218 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
1219 %ld2 = load i8, ptr %arrayidx1, align 1
1220 %cmp3 = icmp eq i8 %ld1, %ld2
1221 br i1 %cmp3, label %loop.inc, label %loop.early.exit
1224 %exitcond = icmp ne i64 %index.next, 67
1225 br i1 %exitcond, label %loop, label %loop.end
1228 %retval1 = phi i64 [ %index.next, %loop ]
1232 %retval2 = phi i64 [ %index, %loop.inc ]
1237 define i64 @loop_contains_safe_call() {
1238 ; CHECK-LABEL: define i64 @loop_contains_safe_call() {
1239 ; CHECK-NEXT: entry:
1240 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
1241 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
1242 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
1243 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
1244 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1246 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1247 ; CHECK: vector.body:
1248 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
1249 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1250 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
1251 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1252 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[TMP0]]
1253 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 0
1254 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 1
1255 ; CHECK-NEXT: [[TMP3:%.*]] = call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> [[WIDE_LOAD]])
1256 ; CHECK-NEXT: [[TMP4:%.*]] = fcmp fast ult <4 x float> [[TMP3]], splat (float 3.000000e+00)
1257 ; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 4
1258 ; CHECK-NEXT: [[TMP5:%.*]] = xor <4 x i1> [[TMP4]], splat (i1 true)
1259 ; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
1260 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 64
1261 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
1262 ; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
1263 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
1264 ; CHECK: middle.split:
1265 ; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
1266 ; CHECK: vector.early.exit:
1267 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP5]], i1 true)
1268 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
1269 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
1270 ; CHECK: middle.block:
1271 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
1273 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
1274 ; CHECK-NEXT: br label [[LOOP:%.*]]
1276 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1277 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P1]], i64 [[INDEX]]
1278 ; CHECK-NEXT: [[LD1:%.*]] = load float, ptr [[ARRAYIDX]], align 1
1279 ; CHECK-NEXT: [[SQRT:%.*]] = tail call fast float @llvm.sqrt.f32(float [[LD1]])
1280 ; CHECK-NEXT: [[CMP:%.*]] = fcmp fast ult float [[SQRT]], 3.000000e+00
1281 ; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]]
1283 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
1284 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
1285 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP31:![0-9]+]]
1287 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
1288 ; CHECK-NEXT: ret i64 [[RETVAL]]
1291 %p1 = alloca [1024 x i8]
1292 %p2 = alloca [1024 x i8]
1293 call void @init_mem(ptr %p1, i64 1024)
1294 call void @init_mem(ptr %p2, i64 1024)
1298 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
1299 %arrayidx = getelementptr inbounds float, ptr %p1, i64 %index
1300 %ld1 = load float, ptr %arrayidx, align 1
1301 %sqrt = tail call fast float @llvm.sqrt.f32(float %ld1)
1302 %cmp = fcmp fast ult float %sqrt, 3.0e+00
1303 br i1 %cmp, label %loop.inc, label %loop.end
1306 %index.next = add i64 %index, 1
1307 %exitcond = icmp ne i64 %index.next, 67
1308 br i1 %exitcond, label %loop, label %loop.end
1311 %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
1316 define i64 @loop_contains_safe_div() {
1317 ; CHECK-LABEL: define i64 @loop_contains_safe_div() {
1318 ; CHECK-NEXT: entry:
1319 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
1320 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
1321 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
1322 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
1323 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1325 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1326 ; CHECK: vector.body:
1327 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VECTOR_BODY]] ]
1328 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1329 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
1330 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1331 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[TMP0]]
1332 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0
1333 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 1
1334 ; CHECK-NEXT: [[TMP3:%.*]] = udiv <4 x i32> [[WIDE_LOAD]], splat (i32 20000)
1335 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i32> [[TMP3]], splat (i32 1)
1336 ; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 4
1337 ; CHECK-NEXT: [[TMP5:%.*]] = xor <4 x i1> [[TMP4]], splat (i1 true)
1338 ; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
1339 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 64
1340 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
1341 ; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
1342 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
1343 ; CHECK: middle.split:
1344 ; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
1345 ; CHECK: vector.early.exit:
1346 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP5]], i1 true)
1347 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
1348 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
1349 ; CHECK: middle.block:
1350 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
1352 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
1353 ; CHECK-NEXT: br label [[LOOP:%.*]]
1355 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1356 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]]
1357 ; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1
1358 ; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[LD1]], 20000
1359 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[DIV]], 1
1360 ; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]]
1362 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
1363 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
1364 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP33:![0-9]+]]
1366 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
1367 ; CHECK-NEXT: ret i64 [[RETVAL]]
1370 %p1 = alloca [1024 x i8]
1371 %p2 = alloca [1024 x i8]
1372 call void @init_mem(ptr %p1, i64 1024)
1373 call void @init_mem(ptr %p2, i64 1024)
1377 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
1378 %arrayidx = getelementptr inbounds i32, ptr %p1, i64 %index
1379 %ld1 = load i32, ptr %arrayidx, align 1
1380 %div = udiv i32 %ld1, 20000
1381 %cmp = icmp eq i32 %div, 1
1382 br i1 %cmp, label %loop.inc, label %loop.end
1385 %index.next = add i64 %index, 1
1386 %exitcond = icmp ne i64 %index.next, 67
1387 br i1 %exitcond, label %loop, label %loop.end
1390 %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
1395 define i64 @loop_contains_load_after_early_exit(ptr dereferenceable(1024) align(8) %p2) {
1396 ; CHECK-LABEL: define i64 @loop_contains_load_after_early_exit(
1397 ; CHECK-SAME: ptr align 8 dereferenceable(1024) [[P2:%.*]]) {
1398 ; CHECK-NEXT: entry:
1399 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
1400 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
1401 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1403 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1404 ; CHECK: vector.body:
1405 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
1406 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1407 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
1408 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1409 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[TMP0]]
1410 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0
1411 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 1
1412 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], splat (i32 1)
1413 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P2]], i64 [[TMP0]]
1414 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[TMP4]], i32 0
1415 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8
1416 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
1417 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP3]], splat (i1 true)
1418 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
1419 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
1420 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
1421 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
1422 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
1423 ; CHECK: middle.split:
1424 ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[WIDE_LOAD2]], i32 3
1425 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
1426 ; CHECK: vector.early.exit:
1427 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
1428 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
1429 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
1430 ; CHECK: middle.block:
1431 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
1433 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
1434 ; CHECK-NEXT: br label [[LOOP:%.*]]
1436 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1437 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[P1]], i64 [[INDEX]]
1438 ; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[ARRAYIDX]], align 1
1439 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD1]], 1
1440 ; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_INC]], label [[LOOP_END]]
1442 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, ptr [[P2]], i64 [[INDEX]]
1443 ; CHECK-NEXT: [[LD2:%.*]] = load i64, ptr [[ARRAYIDX2]], align 8
1444 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
1445 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
1446 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP35:![0-9]+]]
1448 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ [[LD2]], [[LOOP_INC]] ], [ [[TMP10]], [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
1449 ; CHECK-NEXT: ret i64 [[RETVAL]]
1452 %p1 = alloca [1024 x i8]
1453 call void @init_mem(ptr %p1, i64 1024)
1457 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
1458 %arrayidx = getelementptr inbounds i32, ptr %p1, i64 %index
1459 %ld1 = load i32, ptr %arrayidx, align 1
1460 %cmp = icmp eq i32 %ld1, 1
1461 br i1 %cmp, label %loop.inc, label %loop.end
1464 %arrayidx2 = getelementptr inbounds i64, ptr %p2, i64 %index
1465 %ld2 = load i64, ptr %arrayidx2, align 8
1466 %index.next = add i64 %index, 1
1467 %exitcond = icmp ne i64 %index.next, 67
1468 br i1 %exitcond, label %loop, label %loop.end
1471 %retval = phi i64 [ %index, %loop ], [ %ld2, %loop.inc ]
1476 define i64 @same_exit_block_pre_inc_use1_reverse() {
1477 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use1_reverse() {
1478 ; CHECK-NEXT: entry:
1479 ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 1
1480 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 1
1481 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024)
1482 ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024)
1483 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1485 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1486 ; CHECK: vector.body:
1487 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT4:%.*]], [[VECTOR_BODY]] ]
1488 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 1023, i64 1022, i64 1021, i64 1020>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1489 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX1]]
1490 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1491 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
1492 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
1493 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 -3
1494 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
1495 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i8> [[WIDE_LOAD]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
1496 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
1497 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
1498 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 -3
1499 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP6]], align 1
1500 ; CHECK-NEXT: [[REVERSE3:%.*]] = shufflevector <4 x i8> [[WIDE_LOAD2]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
1501 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq <4 x i8> [[REVERSE]], [[REVERSE3]]
1502 ; CHECK-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX1]], 4
1503 ; CHECK-NEXT: [[TMP8:%.*]] = xor <4 x i1> [[TMP7]], splat (i1 true)
1504 ; CHECK-NEXT: [[TMP9:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP8]])
1505 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 1020
1506 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 -4)
1507 ; CHECK-NEXT: [[TMP11:%.*]] = or i1 [[TMP9]], [[TMP10]]
1508 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
1509 ; CHECK: middle.split:
1510 ; CHECK-NEXT: br i1 [[TMP9]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
1511 ; CHECK: vector.early.exit:
1512 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP8]], i1 true)
1513 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
1514 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
1515 ; CHECK: middle.block:
1516 ; CHECK-NEXT: br i1 false, label [[LOOP_END]], label [[SCALAR_PH]]
1518 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3, [[MIDDLE_BLOCK]] ], [ 1023, [[ENTRY:%.*]] ]
1519 ; CHECK-NEXT: br label [[LOOP:%.*]]
1521 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1522 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
1523 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
1524 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
1525 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
1526 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
1527 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
1529 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], -1
1530 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDEX_NEXT]], 0
1531 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP_END]], label [[LOOP]], !llvm.loop [[LOOP37:![0-9]+]]
1533 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 1024, [[LOOP_INC]] ], [ 1024, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
1534 ; CHECK-NEXT: ret i64 [[RETVAL]]
1537 %p1 = alloca [1024 x i8]
1538 %p2 = alloca [1024 x i8]
1539 call void @init_mem(ptr %p1, i64 1024)
1540 call void @init_mem(ptr %p2, i64 1024)
1544 %index = phi i64 [ %index.next, %loop.inc ], [ 1023, %entry ]
1545 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
1546 %ld1 = load i8, ptr %arrayidx, align 1
1547 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
1548 %ld2 = load i8, ptr %arrayidx1, align 1
1549 %cmp3 = icmp eq i8 %ld1, %ld2
1550 br i1 %cmp3, label %loop.inc, label %loop.end
1553 %index.next = add i64 %index, -1
1554 %exitcond = icmp eq i64 %index.next, 0
1555 br i1 %exitcond, label %loop.end, label %loop
1558 %retval = phi i64 [ %index, %loop ], [ 1024, %loop.inc ]
1563 define i64 @same_exit_block_pre_inc_use1_deref_ptrs(ptr dereferenceable(1024) %p1, ptr dereferenceable(1024) %p2) {
1564 ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use1_deref_ptrs(
1565 ; CHECK-SAME: ptr dereferenceable(1024) [[P1:%.*]], ptr dereferenceable(1024) [[P2:%.*]]) {
1566 ; CHECK-NEXT: entry:
1567 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1569 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1570 ; CHECK: vector.body:
1571 ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VECTOR_BODY]] ]
1572 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 3, i64 4, i64 5, i64 6>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
1573 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]]
1574 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
1575 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[TMP0]]
1576 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
1577 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
1578 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[TMP0]]
1579 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0
1580 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP4]], align 1
1581 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]]
1582 ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4
1583 ; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], splat (i1 true)
1584 ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]])
1585 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 64
1586 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
1587 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
1588 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_SPLIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
1589 ; CHECK: middle.split:
1590 ; CHECK-NEXT: br i1 [[TMP7]], label [[VECTOR_EARLY_EXIT:%.*]], label [[MIDDLE_BLOCK:%.*]]
1591 ; CHECK: vector.early.exit:
1592 ; CHECK-NEXT: [[FIRST_ACTIVE_LANE:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP6]], i1 true)
1593 ; CHECK-NEXT: [[EARLY_EXIT_VALUE:%.*]] = extractelement <4 x i64> [[VEC_IND]], i64 [[FIRST_ACTIVE_LANE]]
1594 ; CHECK-NEXT: br label [[LOOP_END:%.*]]
1595 ; CHECK: middle.block:
1596 ; CHECK-NEXT: br i1 true, label [[LOOP_END]], label [[SCALAR_PH]]
1598 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 67, [[MIDDLE_BLOCK]] ], [ 3, [[ENTRY:%.*]] ]
1599 ; CHECK-NEXT: br label [[LOOP:%.*]]
1601 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[LOOP_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1602 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX]]
1603 ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
1604 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX]]
1605 ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
1606 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]]
1607 ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC]], label [[LOOP_END]]
1609 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 1
1610 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 67
1611 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[LOOP_END]], !llvm.loop [[LOOP39:![0-9]+]]
1613 ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX]], [[LOOP]] ], [ 67, [[LOOP_INC]] ], [ 67, [[MIDDLE_BLOCK]] ], [ [[EARLY_EXIT_VALUE]], [[VECTOR_EARLY_EXIT]] ]
1614 ; CHECK-NEXT: ret i64 [[RETVAL]]
1620 %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ]
1621 %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index
1622 %ld1 = load i8, ptr %arrayidx, align 1
1623 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index
1624 %ld2 = load i8, ptr %arrayidx1, align 1
1625 %cmp3 = icmp eq i8 %ld1, %ld2
1626 br i1 %cmp3, label %loop.inc, label %loop.end
1629 %index.next = add i64 %index, 1
1630 %exitcond = icmp ne i64 %index.next, 67
1631 br i1 %exitcond, label %loop, label %loop.end
1634 %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ]
1639 declare i32 @foo(i32) readonly
1640 declare <vscale x 4 x i32> @foo_vec(<vscale x 4 x i32>)
1642 attributes #0 = { "vector-function-abi-variant"="_ZGVsNxv_foo(foo_vec)" }
1644 ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
1645 ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
1646 ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
1647 ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
1648 ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
1649 ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
1650 ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
1651 ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
1652 ; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
1653 ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
1654 ; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
1655 ; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]}
1656 ; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
1657 ; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}
1658 ; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
1659 ; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]}
1660 ; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
1661 ; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META2]], [[META1]]}
1662 ; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]}
1663 ; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META2]], [[META1]]}
1664 ; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]}
1665 ; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META2]], [[META1]]}
1666 ; CHECK: [[LOOP22]] = distinct !{[[LOOP22]], [[META1]], [[META2]]}
1667 ; CHECK: [[LOOP23]] = distinct !{[[LOOP23]], [[META2]], [[META1]]}
1668 ; CHECK: [[LOOP24]] = distinct !{[[LOOP24]], [[META1]], [[META2]]}
1669 ; CHECK: [[LOOP25]] = distinct !{[[LOOP25]], [[META2]], [[META1]]}
1670 ; CHECK: [[LOOP26]] = distinct !{[[LOOP26]], [[META1]], [[META2]]}
1671 ; CHECK: [[LOOP27]] = distinct !{[[LOOP27]], [[META2]], [[META1]]}
1672 ; CHECK: [[LOOP28]] = distinct !{[[LOOP28]], [[META1]], [[META2]]}
1673 ; CHECK: [[LOOP29]] = distinct !{[[LOOP29]], [[META2]], [[META1]]}
1674 ; CHECK: [[LOOP30]] = distinct !{[[LOOP30]], [[META1]], [[META2]]}
1675 ; CHECK: [[LOOP31]] = distinct !{[[LOOP31]], [[META2]], [[META1]]}
1676 ; CHECK: [[LOOP32]] = distinct !{[[LOOP32]], [[META1]], [[META2]]}
1677 ; CHECK: [[LOOP33]] = distinct !{[[LOOP33]], [[META2]], [[META1]]}
1678 ; CHECK: [[LOOP34]] = distinct !{[[LOOP34]], [[META1]], [[META2]]}
1679 ; CHECK: [[LOOP35]] = distinct !{[[LOOP35]], [[META2]], [[META1]]}
1680 ; CHECK: [[LOOP36]] = distinct !{[[LOOP36]], [[META1]], [[META2]]}
1681 ; CHECK: [[LOOP37]] = distinct !{[[LOOP37]], [[META2]], [[META1]]}
1682 ; CHECK: [[LOOP38]] = distinct !{[[LOOP38]], [[META1]], [[META2]]}
1683 ; CHECK: [[LOOP39]] = distinct !{[[LOOP39]], [[META2]], [[META1]]}