2 ; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -instcombine -debug-only=loop-vectorize -disable-output -print-after=instcombine -enable-new-pm=0 2>&1 | FileCheck %s
3 ; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses -instcombine -debug-only=loop-vectorize -disable-output -print-after=instcombine -enable-new-pm=0 2>&1 | FileCheck %s --check-prefix=INTER
4 ; RUN: opt < %s -passes=loop-vectorize,instcombine -force-vector-width=4 -force-vector-interleave=1 -debug-only=loop-vectorize -disable-output -print-after=instcombine 2>&1 | FileCheck %s
5 ; RUN: opt < %s -passes=loop-vectorize,instcombine -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses -debug-only=loop-vectorize -disable-output -print-after=instcombine 2>&1 | FileCheck %s --check-prefix=INTER
7 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
9 %pair = type { i32, i32 }
11 ; CHECK-LABEL: consecutive_ptr_forward
13 ; Check that a forward consecutive pointer is recognized as uniform and remains
14 ; uniform after vectorization.
16 ; CHECK: LV: Found uniform instruction: %tmp1 = getelementptr inbounds i32, i32* %a, i64 %i
18 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
19 ; CHECK-NOT: getelementptr
20 ; CHECK: getelementptr inbounds i32, i32* %a, i64 %index
21 ; CHECK-NOT: getelementptr
22 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
24 define i32 @consecutive_ptr_forward(i32* %a, i64 %n) {
29 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
30 %tmp0 = phi i32 [ %tmp3, %for.body ], [ 0, %entry ]
31 %tmp1 = getelementptr inbounds i32, i32* %a, i64 %i
32 %tmp2 = load i32, i32* %tmp1, align 8
33 %tmp3 = add i32 %tmp0, %tmp2
34 %i.next = add nuw nsw i64 %i, 1
35 %cond = icmp slt i64 %i.next, %n
36 br i1 %cond, label %for.body, label %for.end
39 %tmp4 = phi i32 [ %tmp3, %for.body ]
43 ; CHECK-LABEL: consecutive_ptr_reverse
45 ; Check that a reverse consecutive pointer is recognized as uniform and remains
46 ; uniform after vectorization.
48 ; CHECK: LV: Found uniform instruction: %tmp1 = getelementptr inbounds i32, i32* %a, i64 %i
50 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
51 ; CHECK: %offset.idx = sub i64 %n, %index
52 ; CHECK-NOT: getelementptr
53 ; CHECK: %[[G0IDX:.+]] = add nsw i64 %offset.idx, -3
54 ; CHECK: getelementptr inbounds i32, i32* %a, i64 %[[G0IDX]]
55 ; CHECK-NOT: getelementptr
56 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
58 define i32 @consecutive_ptr_reverse(i32* %a, i64 %n) {
63 %i = phi i64 [ %i.next, %for.body ], [ %n, %entry ]
64 %tmp0 = phi i32 [ %tmp3, %for.body ], [ 0, %entry ]
65 %tmp1 = getelementptr inbounds i32, i32* %a, i64 %i
66 %tmp2 = load i32, i32* %tmp1, align 8
67 %tmp3 = add i32 %tmp0, %tmp2
68 %i.next = add nuw nsw i64 %i, -1
69 %cond = icmp sgt i64 %i.next, 0
70 br i1 %cond, label %for.body, label %for.end
73 %tmp4 = phi i32 [ %tmp3, %for.body ]
77 ; CHECK-LABEL: interleaved_access_forward
78 ; INTER-LABEL: interleaved_access_forward
80 ; Check that a consecutive-like pointer used by a forward interleaved group is
81 ; recognized as uniform and remains uniform after vectorization. When
82 ; interleaved memory accesses aren't enabled, the pointer should not be
83 ; recognized as uniform, and it should not be uniform after vectorization.
85 ; CHECK-NOT: LV: Found uniform instruction: %tmp1 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
86 ; CHECK-NOT: LV: Found uniform instruction: %tmp2 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 1
88 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
89 ; CHECK: %[[I1:.+]] = or i64 %index, 1
90 ; CHECK: %[[I2:.+]] = or i64 %index, 2
91 ; CHECK: %[[I3:.+]] = or i64 %index, 3
92 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %index, i32 0
93 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I1]], i32 0
94 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I2]], i32 0
95 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I3]], i32 0
96 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %index, i32 1
97 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I1]], i32 1
98 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I2]], i32 1
99 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I3]], i32 1
100 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
102 ; INTER: LV: Found uniform instruction: %tmp1 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
103 ; INTER: LV: Found uniform instruction: %tmp2 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 1
105 ; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
106 ; INTER-NOT: getelementptr
107 ; INTER: getelementptr inbounds %pair, %pair* %p, i64 %index, i32 0
108 ; INTER-NOT: getelementptr
109 ; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
111 define i32 @interleaved_access_forward(%pair* %p, i64 %n) {
116 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
117 %tmp0 = phi i32 [ %tmp6, %for.body ], [ 0, %entry ]
118 %tmp1 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
119 %tmp2 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 1
120 %tmp3 = load i32, i32* %tmp1, align 8
121 %tmp4 = load i32, i32* %tmp2, align 8
122 %tmp5 = add i32 %tmp3, %tmp4
123 %tmp6 = add i32 %tmp0, %tmp5
124 %i.next = add nuw nsw i64 %i, 1
125 %cond = icmp slt i64 %i.next, %n
126 br i1 %cond, label %for.body, label %for.end
129 %tmp14 = phi i32 [ %tmp6, %for.body ]
133 ; CHECK-LABEL: interleaved_access_reverse
134 ; INTER-LABEL: interleaved_access_reverse
136 ; Check that a consecutive-like pointer used by a reverse interleaved group is
137 ; recognized as uniform and remains uniform after vectorization. When
138 ; interleaved memory accesses aren't enabled, the pointer should not be
139 ; recognized as uniform, and it should not be uniform after vectorization.
141 ; recognized as uniform, and it should not be uniform after vectorization.
142 ; CHECK-NOT: LV: Found uniform instruction: %tmp1 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
143 ; CHECK-NOT: LV: Found uniform instruction: %tmp2 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 1
145 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
146 ; CHECK: %offset.idx = sub i64 %n, %index
147 ; CHECK: %[[I1:.+]] = add i64 %offset.idx, -1
148 ; CHECK: %[[I2:.+]] = add i64 %offset.idx, -2
149 ; CHECK: %[[I3:.+]] = add i64 %offset.idx, -3
150 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %offset.idx, i32 0
151 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I1]], i32 0
152 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I2]], i32 0
153 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I3]], i32 0
154 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %offset.idx, i32 1
155 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I1]], i32 1
156 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I2]], i32 1
157 ; CHECK: getelementptr inbounds %pair, %pair* %p, i64 %[[I3]], i32 1
158 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
160 ; INTER: LV: Found uniform instruction: %tmp1 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
161 ; INTER: LV: Found uniform instruction: %tmp2 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 1
163 ; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
164 ; INTER: %offset.idx = sub i64 %n, %index
165 ; INTER-NOT: getelementptr
166 ; INTER: %[[G0:.+]] = getelementptr inbounds %pair, %pair* %p, i64 %offset.idx, i32 0
167 ; INTER: getelementptr inbounds i32, i32* %[[G0]], i64 -6
168 ; INTER-NOT: getelementptr
169 ; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
171 define i32 @interleaved_access_reverse(%pair* %p, i64 %n) {
176 %i = phi i64 [ %i.next, %for.body ], [ %n, %entry ]
177 %tmp0 = phi i32 [ %tmp6, %for.body ], [ 0, %entry ]
178 %tmp1 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
179 %tmp2 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 1
180 %tmp3 = load i32, i32* %tmp1, align 8
181 %tmp4 = load i32, i32* %tmp2, align 8
182 %tmp5 = add i32 %tmp3, %tmp4
183 %tmp6 = add i32 %tmp0, %tmp5
184 %i.next = add nuw nsw i64 %i, -1
185 %cond = icmp sgt i64 %i.next, 0
186 br i1 %cond, label %for.body, label %for.end
189 %tmp14 = phi i32 [ %tmp6, %for.body ]
193 ; INTER-LABEL: predicated_store
195 ; Check that a consecutive-like pointer used by a forward interleaved group and
196 ; scalarized store is not recognized as uniform and is not uniform after
197 ; vectorization. The store is scalarized because it's in a predicated block.
198 ; Even though the load in this example is vectorized and only uses the pointer
199 ; as if it were uniform, the store is scalarized, making the pointer
202 ; INTER-NOT: LV: Found uniform instruction: %tmp0 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
204 ; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, {{.*}} ]
205 ; INTER: %[[G0:.+]] = getelementptr inbounds %pair, %pair* %p, i64 %index, i32 0
206 ; INTER: %[[B0:.+]] = bitcast i32* %[[G0]] to <8 x i32>*
207 ; INTER: %wide.vec = load <8 x i32>, <8 x i32>* %[[B0]], align 8
208 ; INTER: %[[I1:.+]] = or i64 %index, 1
209 ; INTER: getelementptr inbounds %pair, %pair* %p, i64 %[[I1]], i32 0
210 ; INTER: %[[I2:.+]] = or i64 %index, 2
211 ; INTER: getelementptr inbounds %pair, %pair* %p, i64 %[[I2]], i32 0
212 ; INTER: %[[I3:.+]] = or i64 %index, 3
213 ; INTER: getelementptr inbounds %pair, %pair* %p, i64 %[[I3]], i32 0
214 ; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
216 define void @predicated_store(%pair *%p, i32 %x, i64 %n) {
221 %i = phi i64 [ %i.next, %if.merge ], [ 0, %entry ]
222 %tmp0 = getelementptr inbounds %pair, %pair* %p, i64 %i, i32 0
223 %tmp1 = load i32, i32* %tmp0, align 8
224 %tmp2 = icmp eq i32 %tmp1, %x
225 br i1 %tmp2, label %if.then, label %if.merge
228 store i32 %tmp1, i32* %tmp0, align 8
232 %i.next = add nuw nsw i64 %i, 1
233 %cond = icmp slt i64 %i.next, %n
234 br i1 %cond, label %for.body, label %for.end
240 ; CHECK-LABEL: irregular_type
242 ; Check that a consecutive pointer used by a scalarized store is not recognized
243 ; as uniform and is not uniform after vectorization. The store is scalarized
244 ; because the stored type may required padding.
246 ; CHECK-NOT: LV: Found uniform instruction: %tmp1 = getelementptr inbounds x86_fp80, x86_fp80* %a, i64 %i
248 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
249 ; CHECK: %[[I1:.+]] = or i64 %index, 1
250 ; CHECK: %[[I2:.+]] = or i64 %index, 2
251 ; CHECK: %[[I3:.+]] = or i64 %index, 3
252 ; CHECK: getelementptr inbounds x86_fp80, x86_fp80* %a, i64 %index
253 ; CHECK: getelementptr inbounds x86_fp80, x86_fp80* %a, i64 %[[I1]]
254 ; CHECK: getelementptr inbounds x86_fp80, x86_fp80* %a, i64 %[[I2]]
255 ; CHECK: getelementptr inbounds x86_fp80, x86_fp80* %a, i64 %[[I3]]
256 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
258 define void @irregular_type(x86_fp80* %a, i64 %n) {
263 %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
264 %tmp0 = sitofp i32 1 to x86_fp80
265 %tmp1 = getelementptr inbounds x86_fp80, x86_fp80* %a, i64 %i
266 store x86_fp80 %tmp0, x86_fp80* %tmp1, align 16
267 %i.next = add i64 %i, 1
268 %cond = icmp slt i64 %i.next, %n
269 br i1 %cond, label %for.body, label %for.end
275 ; CHECK-LABEL: pointer_iv_uniform
277 ; Check that a pointer induction variable is recognized as uniform and remains
278 ; uniform after vectorization.
280 ; CHECK: LV: Found uniform instruction: %p = phi i32* [ %tmp03, %for.body ], [ %a, %entry ]
282 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
283 ; CHECK-NOT: getelementptr
284 ; CHECK: %next.gep = getelementptr i32, i32* %a, i64 %index
285 ; CHECK-NOT: getelementptr
286 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
288 define void @pointer_iv_uniform(i32* %a, i32 %x, i64 %n) {
293 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
294 %p = phi i32* [ %tmp03, %for.body ], [ %a, %entry ]
295 store i32 %x, i32* %p, align 8
296 %tmp03 = getelementptr inbounds i32, i32* %p, i32 1
297 %i.next = add nuw nsw i64 %i, 1
298 %cond = icmp slt i64 %i.next, %n
299 br i1 %cond, label %for.body, label %for.end
305 ; INTER-LABEL: pointer_iv_non_uniform_0
307 ; Check that a pointer induction variable with a non-uniform user is not
308 ; recognized as uniform and is not uniform after vectorization. The pointer
309 ; induction variable is used by getelementptr instructions that are non-uniform
310 ; due to scalarization of the stores.
312 ; INTER-NOT: LV: Found uniform instruction: %p = phi i32* [ %tmp03, %for.body ], [ %a, %entry ]
314 ; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
315 ; INTER: %[[I0:.+]] = shl i64 %index, 2
316 ; INTER: %next.gep = getelementptr i32, i32* %a, i64 %[[I0]]
317 ; INTER: %[[S1:.+]] = shl i64 %index, 2
318 ; INTER: %[[I1:.+]] = or i64 %[[S1]], 4
319 ; INTER: %next.gep2 = getelementptr i32, i32* %a, i64 %[[I1]]
320 ; INTER: %[[S2:.+]] = shl i64 %index, 2
321 ; INTER: %[[I2:.+]] = or i64 %[[S2]], 8
322 ; INTER: %next.gep3 = getelementptr i32, i32* %a, i64 %[[I2]]
323 ; INTER: %[[S3:.+]] = shl i64 %index, 2
324 ; INTER: %[[I3:.+]] = or i64 %[[S3]], 12
325 ; INTER: %next.gep4 = getelementptr i32, i32* %a, i64 %[[I3]]
326 ; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
328 define void @pointer_iv_non_uniform_0(i32* %a, i64 %n) {
333 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
334 %p = phi i32* [ %tmp03, %for.body ], [ %a, %entry ]
335 %tmp00 = load i32, i32* %p, align 8
336 %tmp01 = getelementptr inbounds i32, i32* %p, i32 1
337 %tmp02 = load i32, i32* %tmp01, align 8
338 %tmp03 = getelementptr inbounds i32, i32* %p, i32 4
339 %tmp04 = load i32, i32* %tmp03, align 8
340 %tmp05 = getelementptr inbounds i32, i32* %p, i32 5
341 %tmp06 = load i32, i32* %tmp05, align 8
342 %tmp07 = sub i32 %tmp04, %tmp00
343 %tmp08 = sub i32 %tmp02, %tmp02
344 %tmp09 = getelementptr inbounds i32, i32* %p, i32 2
345 store i32 %tmp07, i32* %tmp09, align 8
346 %tmp10 = getelementptr inbounds i32, i32* %p, i32 3
347 store i32 %tmp08, i32* %tmp10, align 8
348 %i.next = add nuw nsw i64 %i, 1
349 %cond = icmp slt i64 %i.next, %n
350 br i1 %cond, label %for.body, label %for.end
356 ; CHECK-LABEL: pointer_iv_non_uniform_1
358 ; Check that a pointer induction variable with a non-uniform user is not
359 ; recognized as uniform and is not uniform after vectorization. The pointer
360 ; induction variable is used by a store that will be scalarized.
362 ; CHECK-NOT: LV: Found uniform instruction: %p = phi x86_fp80* [%tmp1, %for.body], [%a, %entry]
364 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
365 ; CHECK: %next.gep = getelementptr x86_fp80, x86_fp80* %a, i64 %index
366 ; CHECK: %[[I1:.+]] = or i64 %index, 1
367 ; CHECK: %next.gep2 = getelementptr x86_fp80, x86_fp80* %a, i64 %[[I1]]
368 ; CHECK: %[[I2:.+]] = or i64 %index, 2
369 ; CHECK: %next.gep3 = getelementptr x86_fp80, x86_fp80* %a, i64 %[[I2]]
370 ; CHECK: %[[I3:.+]] = or i64 %index, 3
371 ; CHECK: %next.gep4 = getelementptr x86_fp80, x86_fp80* %a, i64 %[[I3]]
372 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
374 define void @pointer_iv_non_uniform_1(x86_fp80* %a, i64 %n) {
379 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
380 %p = phi x86_fp80* [%tmp1, %for.body], [%a, %entry]
381 %tmp0 = sitofp i32 1 to x86_fp80
382 store x86_fp80 %tmp0, x86_fp80* %p, align 16
383 %tmp1 = getelementptr inbounds x86_fp80, x86_fp80* %p, i32 1
384 %i.next = add i64 %i, 1
385 %cond = icmp slt i64 %i.next, %n
386 br i1 %cond, label %for.body, label %for.end
392 ; CHECK-LABEL: pointer_iv_mixed
394 ; Check multiple pointer induction variables where only one is recognized as
395 ; uniform and remains uniform after vectorization. The other pointer induction
396 ; variable is not recognized as uniform and is not uniform after vectorization
397 ; because it is stored to memory.
399 ; CHECK-NOT: LV: Found uniform instruction: %p = phi i32* [ %tmp3, %for.body ], [ %a, %entry ]
400 ; CHECK: LV: Found uniform instruction: %q = phi i32** [ %tmp4, %for.body ], [ %b, %entry ]
402 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
403 ; CHECK: %next.gep = getelementptr i32, i32* %a, i64 %index
404 ; CHECK: %[[I1:.+]] = or i64 %index, 1
405 ; CHECK: %next.gep10 = getelementptr i32, i32* %a, i64 %[[I1]]
406 ; CHECK: %[[I2:.+]] = or i64 %index, 2
407 ; CHECK: %next.gep11 = getelementptr i32, i32* %a, i64 %[[I2]]
408 ; CHECK: %[[I3:.+]] = or i64 %index, 3
409 ; CHECK: %next.gep12 = getelementptr i32, i32* %a, i64 %[[I3]]
410 ; CHECK: %[[V0:.+]] = insertelement <4 x i32*> poison, i32* %next.gep, i32 0
411 ; CHECK: %[[V1:.+]] = insertelement <4 x i32*> %[[V0]], i32* %next.gep10, i32 1
412 ; CHECK: %[[V2:.+]] = insertelement <4 x i32*> %[[V1]], i32* %next.gep11, i32 2
413 ; CHECK: %[[V3:.+]] = insertelement <4 x i32*> %[[V2]], i32* %next.gep12, i32 3
414 ; CHECK-NOT: getelementptr
415 ; CHECK: %next.gep13 = getelementptr i32*, i32** %b, i64 %index
416 ; CHECK-NOT: getelementptr
417 ; CHECK: %[[B0:.+]] = bitcast i32** %next.gep13 to <4 x i32*>*
418 ; CHECK: store <4 x i32*> %[[V3]], <4 x i32*>* %[[B0]], align 8
419 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
421 define i32 @pointer_iv_mixed(i32* %a, i32** %b, i64 %n) {
426 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
427 %p = phi i32* [ %tmp3, %for.body ], [ %a, %entry ]
428 %q = phi i32** [ %tmp4, %for.body ], [ %b, %entry ]
429 %tmp0 = phi i32 [ %tmp2, %for.body ], [ 0, %entry ]
430 %tmp1 = load i32, i32* %p, align 8
431 %tmp2 = add i32 %tmp1, %tmp0
432 store i32* %p, i32** %q, align 8
433 %tmp3 = getelementptr inbounds i32, i32* %p, i32 1
434 %tmp4 = getelementptr inbounds i32*, i32** %q, i32 1
435 %i.next = add nuw nsw i64 %i, 1
436 %cond = icmp slt i64 %i.next, %n
437 br i1 %cond, label %for.body, label %for.end
440 %tmp5 = phi i32 [ %tmp2, %for.body ]
444 ; INTER-LABEL: bitcast_pointer_operand
446 ; Check that a pointer operand having a user other than a memory access is
447 ; recognized as uniform after vectorization. In this test case, %tmp1 is a
448 ; bitcast that is used by a load and a getelementptr instruction (%tmp2). Once
449 ; %tmp2 is marked uniform, %tmp1 should be marked uniform as well.
451 ; INTER: LV: Found uniform instruction: %cond = icmp slt i64 %i.next, %n
452 ; INTER-NEXT: LV: Found uniform instruction: %tmp2 = getelementptr inbounds i8, i8* %tmp1, i64 3
453 ; INTER-NEXT: LV: Found uniform instruction: %tmp6 = getelementptr inbounds i8, i8* %B, i64 %i
454 ; INTER-NEXT: LV: Found uniform instruction: %tmp1 = bitcast i64* %tmp0 to i8*
455 ; INTER-NEXT: LV: Found uniform instruction: %tmp0 = getelementptr inbounds i64, i64* %A, i64 %i
456 ; INTER-NEXT: LV: Found uniform instruction: %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
457 ; INTER-NEXT: LV: Found uniform instruction: %i.next = add nuw nsw i64 %i, 1
458 ; INTER: vector.body:
459 ; INTER-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
460 ; INTER-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, i64* %A, i64 [[INDEX]]
461 ; INTER-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to <32 x i8>*
462 ; INTER-NEXT: [[WIDE_VEC:%.*]] = load <32 x i8>, <32 x i8>* [[TMP5]], align 1
463 ; INTER-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <32 x i8> [[WIDE_VEC]], <32 x i8> poison, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
464 ; INTER-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <32 x i8> [[WIDE_VEC]], <32 x i8> poison, <4 x i32> <i32 3, i32 11, i32 19, i32 27>
465 ; INTER-NEXT: [[TMP6:%.*]] = xor <4 x i8> [[STRIDED_VEC5]], [[STRIDED_VEC]]
466 ; INTER-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, i8* %B, i64 [[INDEX]]
467 ; INTER-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to <4 x i8>*
468 ; INTER-NEXT: store <4 x i8> [[TMP6]], <4 x i8>* [[TMP8]], align 1
469 ; INTER-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
470 ; INTER: br i1 {{.*}}, label %middle.block, label %vector.body
472 define void @bitcast_pointer_operand(i64* %A, i8* %B, i64 %n) {
477 %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
478 %tmp0 = getelementptr inbounds i64, i64* %A, i64 %i
479 %tmp1 = bitcast i64* %tmp0 to i8*
480 %tmp2 = getelementptr inbounds i8, i8* %tmp1, i64 3
481 %tmp3 = load i8, i8* %tmp2, align 1
482 %tmp4 = load i8, i8* %tmp1, align 1
483 %tmp5 = xor i8 %tmp3, %tmp4
484 %tmp6 = getelementptr inbounds i8, i8* %B, i64 %i
485 store i8 %tmp5, i8* %tmp6
486 %i.next = add nuw nsw i64 %i, 1
487 %cond = icmp slt i64 %i.next, %n
488 br i1 %cond, label %for.body, label %for.end