1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basicaa -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -loop-vectorize-with-block-frequency -dce -instcombine -S | FileCheck %s
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-apple-macosx10.8.0"
7 @b = common global [2048 x i32] zeroinitializer, align 16
8 @c = common global [2048 x i32] zeroinitializer, align 16
9 @a = common global [2048 x i32] zeroinitializer, align 16
10 @G = common global [32 x [1024 x i32]] zeroinitializer, align 16
11 @ub = common global [1024 x i32] zeroinitializer, align 16
12 @uc = common global [1024 x i32] zeroinitializer, align 16
13 @d = common global [2048 x i32] zeroinitializer, align 16
14 @fa = common global [1024 x float] zeroinitializer, align 16
15 @fb = common global [1024 x float] zeroinitializer, align 16
16 @ic = common global [1024 x i32] zeroinitializer, align 16
17 @da = common global [1024 x float] zeroinitializer, align 16
18 @db = common global [1024 x float] zeroinitializer, align 16
19 @dc = common global [1024 x float] zeroinitializer, align 16
20 @dd = common global [1024 x float] zeroinitializer, align 16
21 @dj = common global [1024 x i32] zeroinitializer, align 16
23 ; We can optimize this test without a tail.
24 define void @example1() optsize {
25 ; CHECK-LABEL: @example1(
26 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
28 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
30 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
31 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 [[INDEX]]
32 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[TMP1]] to <4 x i32>*
33 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 16
34 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 [[INDEX]]
35 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <4 x i32>*
36 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 16
37 ; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
38 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 [[INDEX]]
39 ; CHECK-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <4 x i32>*
40 ; CHECK-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP7]], align 16
41 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
42 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
43 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
44 ; CHECK: middle.block:
45 ; CHECK-NEXT: br i1 true, label [[TMP10:%.*]], label [[SCALAR_PH]]
47 ; CHECK-NEXT: br label [[TMP9:%.*]]
48 ; CHECK: br i1 undef, label [[TMP10]], label [[TMP9]], !llvm.loop !2
53 ; <label>:1 ; preds = %1, %0
54 %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
55 %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
56 %3 = load i32, i32* %2, align 4
57 %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
58 %5 = load i32, i32* %4, align 4
59 %6 = add nsw i32 %5, %3
60 %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
61 store i32 %6, i32* %7, align 4
62 %indvars.iv.next = add i64 %indvars.iv, 1
63 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
64 %exitcond = icmp eq i32 %lftr.wideiv, 256
65 br i1 %exitcond, label %8, label %1
67 ; <label>:8 ; preds = %1
71 ; Can vectorize in 'optsize' mode by masking the needed tail.
72 define void @example2(i32 %n, i32 %x) optsize {
73 ; CHECK-LABEL: @example2(
74 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[N:%.*]], 0
75 ; CHECK-NEXT: br i1 [[TMP1]], label [[DOTLR_PH5_PREHEADER:%.*]], label [[DOTPREHEADER:%.*]]
76 ; CHECK: .lr.ph5.preheader:
77 ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[N]], -1
78 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
79 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
81 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw nsw i64 [[TMP3]], 4
82 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N_RND_UP]], 8589934588
83 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> undef, i64 [[TMP3]], i32 0
84 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> undef, <4 x i32> zeroinitializer
85 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
87 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE8:%.*]] ]
88 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> undef, i64 [[INDEX]], i32 0
89 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> undef, <4 x i32> zeroinitializer
90 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
91 ; CHECK-NEXT: [[TMP5:%.*]] = or i64 [[INDEX]], 1
92 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[INDEX]], 2
93 ; CHECK-NEXT: [[TMP7:%.*]] = or i64 [[INDEX]], 3
94 ; CHECK-NEXT: [[TMP8:%.*]] = icmp ule <4 x i64> [[INDUCTION]], [[BROADCAST_SPLAT2]]
95 ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0
96 ; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
97 ; CHECK: pred.store.if:
98 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 [[INDEX]]
99 ; CHECK-NEXT: store i32 [[X:%.*]], i32* [[TMP10]], align 16
100 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
101 ; CHECK: pred.store.continue:
102 ; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1
103 ; CHECK-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
104 ; CHECK: pred.store.if3:
105 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 [[TMP5]]
106 ; CHECK-NEXT: store i32 [[X]], i32* [[TMP12]], align 4
107 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
108 ; CHECK: pred.store.continue4:
109 ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2
110 ; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]]
111 ; CHECK: pred.store.if5:
112 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 [[TMP6]]
113 ; CHECK-NEXT: store i32 [[X]], i32* [[TMP14]], align 8
114 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
115 ; CHECK: pred.store.continue6:
116 ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3
117 ; CHECK-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8]]
118 ; CHECK: pred.store.if7:
119 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 [[TMP7]]
120 ; CHECK-NEXT: store i32 [[X]], i32* [[TMP16]], align 4
121 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
122 ; CHECK: pred.store.continue8:
123 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
124 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
125 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !4
126 ; CHECK: middle.block:
127 ; CHECK-NEXT: br i1 true, label [[DOT_PREHEADER_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
128 ; CHECK: ._crit_edge:
129 ; CHECK-NEXT: ret void
131 %1 = icmp sgt i32 %n, 0
132 br i1 %1, label %.lr.ph5, label %.preheader
134 ..preheader_crit_edge: ; preds = %.lr.ph5
135 %phitmp = sext i32 %n to i64
138 .preheader: ; preds = %..preheader_crit_edge, %0
139 %i.0.lcssa = phi i64 [ %phitmp, %..preheader_crit_edge ], [ 0, %0 ]
140 %2 = icmp eq i32 %n, 0
141 br i1 %2, label %._crit_edge, label %.lr.ph
143 .lr.ph5: ; preds = %0, %.lr.ph5
144 %indvars.iv6 = phi i64 [ %indvars.iv.next7, %.lr.ph5 ], [ 0, %0 ]
145 %3 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv6
146 store i32 %x, i32* %3, align 4
147 %indvars.iv.next7 = add i64 %indvars.iv6, 1
148 %lftr.wideiv = trunc i64 %indvars.iv.next7 to i32
149 %exitcond = icmp eq i32 %lftr.wideiv, %n
150 br i1 %exitcond, label %..preheader_crit_edge, label %.lr.ph5
152 .lr.ph: ; preds = %.preheader, %.lr.ph
153 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ %i.0.lcssa, %.preheader ]
154 %.02 = phi i32 [ %4, %.lr.ph ], [ %n, %.preheader ]
155 %4 = add nsw i32 %.02, -1
156 %5 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
157 %6 = load i32, i32* %5, align 4
158 %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
159 %8 = load i32, i32* %7, align 4
161 %10 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
162 store i32 %9, i32* %10, align 4
163 %indvars.iv.next = add i64 %indvars.iv, 1
164 %11 = icmp eq i32 %4, 0
165 br i1 %11, label %._crit_edge, label %.lr.ph
167 ._crit_edge: ; preds = %.lr.ph, %.preheader
171 ; N is unknown, we need a tail. Can't vectorize because loop has no primary
173 ;CHECK-LABEL: @example3(
174 ;CHECK-NOT: <4 x i32>
176 define void @example3(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture %q) optsize {
177 %1 = icmp eq i32 %n, 0
178 br i1 %1, label %._crit_edge, label %.lr.ph
180 .lr.ph: ; preds = %0, %.lr.ph
181 %.05 = phi i32 [ %2, %.lr.ph ], [ %n, %0 ]
182 %.014 = phi i32* [ %5, %.lr.ph ], [ %p, %0 ]
183 %.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
184 %2 = add nsw i32 %.05, -1
185 %3 = getelementptr inbounds i32, i32* %.023, i64 1
186 %4 = load i32, i32* %.023, align 16
187 %5 = getelementptr inbounds i32, i32* %.014, i64 1
188 store i32 %4, i32* %.014, align 16
189 %6 = icmp eq i32 %2, 0
190 br i1 %6, label %._crit_edge, label %.lr.ph
192 ._crit_edge: ; preds = %.lr.ph, %0
196 ; We can't vectorize this one because we need a runtime ptr check.
197 ;CHECK-LABEL: @example23(
198 ;CHECK-NOT: <4 x i32>
200 define void @example23(i16* nocapture %src, i32* nocapture %dst) optsize {
203 ; <label>:1 ; preds = %1, %0
204 %.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
205 %.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
206 %i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
207 %2 = getelementptr inbounds i16, i16* %.04, i64 1
208 %3 = load i16, i16* %.04, align 2
209 %4 = zext i16 %3 to i32
210 %5 = shl nuw nsw i32 %4, 7
211 %6 = getelementptr inbounds i32, i32* %.013, i64 1
212 store i32 %5, i32* %.013, align 4
213 %7 = add nsw i32 %i.02, 1
214 %exitcond = icmp eq i32 %7, 256
215 br i1 %exitcond, label %8, label %1
217 ; <label>:8 ; preds = %1
222 ; We CAN vectorize this example because the pointers are marked as noalias.
223 define void @example23b(i16* noalias nocapture %src, i32* noalias nocapture %dst) optsize {
224 ; CHECK-LABEL: @example23b(
225 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
227 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
228 ; CHECK: vector.body:
229 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
230 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i16, i16* [[SRC:%.*]], i64 [[INDEX]]
231 ; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i32, i32* [[DST:%.*]], i64 [[INDEX]]
232 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[NEXT_GEP]] to <4 x i16>*
233 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, <4 x i16>* [[TMP1]], align 2
234 ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[WIDE_LOAD]] to <4 x i32>
235 ; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw <4 x i32> [[TMP2]], <i32 7, i32 7, i32 7, i32 7>
236 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[NEXT_GEP4]] to <4 x i32>*
237 ; CHECK-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* [[TMP4]], align 4
238 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
239 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
240 ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !6
241 ; CHECK: middle.block:
242 ; CHECK-NEXT: br i1 true, label [[TMP7:%.*]], label [[SCALAR_PH]]
244 ; CHECK-NEXT: br label [[TMP6:%.*]]
245 ; CHECK: br i1 undef, label [[TMP7]], label [[TMP6]], !llvm.loop !7
250 ; <label>:1 ; preds = %1, %0
251 %.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
252 %.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
253 %i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
254 %2 = getelementptr inbounds i16, i16* %.04, i64 1
255 %3 = load i16, i16* %.04, align 2
256 %4 = zext i16 %3 to i32
257 %5 = shl nuw nsw i32 %4, 7
258 %6 = getelementptr inbounds i32, i32* %.013, i64 1
259 store i32 %5, i32* %.013, align 4
260 %7 = add nsw i32 %i.02, 1
261 %exitcond = icmp eq i32 %7, 256
262 br i1 %exitcond, label %8, label %1
264 ; <label>:8 ; preds = %1
268 ; We CAN vectorize this example by folding the tail it entails.
269 define void @example23c(i16* noalias nocapture %src, i32* noalias nocapture %dst) optsize {
270 ; CHECK-LABEL: @example23c(
271 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
273 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
274 ; CHECK: vector.body:
275 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE22:%.*]] ]
276 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> undef, i64 [[INDEX]], i32 0
277 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> undef, <4 x i32> zeroinitializer
278 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
279 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i64> [[INDUCTION]], <i64 257, i64 257, i64 257, i64 257>
280 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0
281 ; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]]
282 ; CHECK: pred.load.if:
283 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i16, i16* [[SRC:%.*]], i64 [[INDEX]]
284 ; CHECK-NEXT: [[TMP3:%.*]] = load i16, i16* [[NEXT_GEP]], align 2
285 ; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]]
286 ; CHECK: pred.load.continue:
287 ; CHECK-NEXT: [[TMP4:%.*]] = phi i16 [ undef, [[VECTOR_BODY]] ], [ [[TMP3]], [[PRED_LOAD_IF]] ]
288 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i1> [[TMP1]], i32 1
289 ; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_LOAD_IF11:%.*]], label [[PRED_LOAD_CONTINUE12:%.*]]
290 ; CHECK: pred.load.if11:
291 ; CHECK-NEXT: [[TMP6:%.*]] = or i64 [[INDEX]], 1
292 ; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i16, i16* [[SRC]], i64 [[TMP6]]
293 ; CHECK-NEXT: [[TMP7:%.*]] = load i16, i16* [[NEXT_GEP4]], align 2
294 ; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE12]]
295 ; CHECK: pred.load.continue12:
296 ; CHECK-NEXT: [[TMP8:%.*]] = phi i16 [ undef, [[PRED_LOAD_CONTINUE]] ], [ [[TMP7]], [[PRED_LOAD_IF11]] ]
297 ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP1]], i32 2
298 ; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_LOAD_IF13:%.*]], label [[PRED_LOAD_CONTINUE14:%.*]]
299 ; CHECK: pred.load.if13:
300 ; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[INDEX]], 2
301 ; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i16, i16* [[SRC]], i64 [[TMP10]]
302 ; CHECK-NEXT: [[TMP11:%.*]] = load i16, i16* [[NEXT_GEP5]], align 2
303 ; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE14]]
304 ; CHECK: pred.load.continue14:
305 ; CHECK-NEXT: [[TMP12:%.*]] = phi i16 [ undef, [[PRED_LOAD_CONTINUE12]] ], [ [[TMP11]], [[PRED_LOAD_IF13]] ]
306 ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP1]], i32 3
307 ; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_LOAD_IF15:%.*]], label [[PRED_LOAD_CONTINUE16:%.*]]
308 ; CHECK: pred.load.if15:
309 ; CHECK-NEXT: [[TMP14:%.*]] = or i64 [[INDEX]], 3
310 ; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i16, i16* [[SRC]], i64 [[TMP14]]
311 ; CHECK-NEXT: [[TMP15:%.*]] = load i16, i16* [[NEXT_GEP6]], align 2
312 ; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE16]]
313 ; CHECK: pred.load.continue16:
314 ; CHECK-NEXT: [[TMP16:%.*]] = phi i16 [ undef, [[PRED_LOAD_CONTINUE14]] ], [ [[TMP15]], [[PRED_LOAD_IF15]] ]
315 ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0
316 ; CHECK-NEXT: br i1 [[TMP17]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
317 ; CHECK: pred.store.if:
318 ; CHECK-NEXT: [[TMP18:%.*]] = zext i16 [[TMP4]] to i32
319 ; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i32 [[TMP18]], 7
320 ; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i32, i32* [[DST:%.*]], i64 [[INDEX]]
321 ; CHECK-NEXT: store i32 [[TMP19]], i32* [[NEXT_GEP7]], align 4
322 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
323 ; CHECK: pred.store.continue:
324 ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP1]], i32 1
325 ; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF17:%.*]], label [[PRED_STORE_CONTINUE18:%.*]]
326 ; CHECK: pred.store.if17:
327 ; CHECK-NEXT: [[TMP21:%.*]] = zext i16 [[TMP8]] to i32
328 ; CHECK-NEXT: [[TMP22:%.*]] = shl nuw nsw i32 [[TMP21]], 7
329 ; CHECK-NEXT: [[TMP23:%.*]] = or i64 [[INDEX]], 1
330 ; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i32, i32* [[DST]], i64 [[TMP23]]
331 ; CHECK-NEXT: store i32 [[TMP22]], i32* [[NEXT_GEP8]], align 4
332 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE18]]
333 ; CHECK: pred.store.continue18:
334 ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i1> [[TMP1]], i32 2
335 ; CHECK-NEXT: br i1 [[TMP24]], label [[PRED_STORE_IF19:%.*]], label [[PRED_STORE_CONTINUE20:%.*]]
336 ; CHECK: pred.store.if19:
337 ; CHECK-NEXT: [[TMP25:%.*]] = zext i16 [[TMP12]] to i32
338 ; CHECK-NEXT: [[TMP26:%.*]] = shl nuw nsw i32 [[TMP25]], 7
339 ; CHECK-NEXT: [[TMP27:%.*]] = or i64 [[INDEX]], 2
340 ; CHECK-NEXT: [[NEXT_GEP9:%.*]] = getelementptr i32, i32* [[DST]], i64 [[TMP27]]
341 ; CHECK-NEXT: store i32 [[TMP26]], i32* [[NEXT_GEP9]], align 4
342 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE20]]
343 ; CHECK: pred.store.continue20:
344 ; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x i1> [[TMP1]], i32 3
345 ; CHECK-NEXT: br i1 [[TMP28]], label [[PRED_STORE_IF21:%.*]], label [[PRED_STORE_CONTINUE22]]
346 ; CHECK: pred.store.if21:
347 ; CHECK-NEXT: [[TMP29:%.*]] = zext i16 [[TMP16]] to i32
348 ; CHECK-NEXT: [[TMP30:%.*]] = shl nuw nsw i32 [[TMP29]], 7
349 ; CHECK-NEXT: [[TMP31:%.*]] = or i64 [[INDEX]], 3
350 ; CHECK-NEXT: [[NEXT_GEP10:%.*]] = getelementptr i32, i32* [[DST]], i64 [[TMP31]]
351 ; CHECK-NEXT: store i32 [[TMP30]], i32* [[NEXT_GEP10]], align 4
352 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE22]]
353 ; CHECK: pred.store.continue22:
354 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
355 ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
356 ; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !8
357 ; CHECK: middle.block:
358 ; CHECK-NEXT: br i1 true, label [[TMP34:%.*]], label [[SCALAR_PH]]
360 ; CHECK-NEXT: br label [[TMP33:%.*]]
361 ; CHECK: br i1 undef, label [[TMP34]], label [[TMP33]], !llvm.loop !9
366 ; <label>:1 ; preds = %1, %0
367 %.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
368 %.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
369 %i.02 = phi i64 [ 0, %0 ], [ %7, %1 ]
370 %2 = getelementptr inbounds i16, i16* %.04, i64 1
371 %3 = load i16, i16* %.04, align 2
372 %4 = zext i16 %3 to i32
373 %5 = shl nuw nsw i32 %4, 7
374 %6 = getelementptr inbounds i32, i32* %.013, i64 1
375 store i32 %5, i32* %.013, align 4
376 %7 = add nsw i64 %i.02, 1
377 %exitcond = icmp eq i64 %7, 257
378 br i1 %exitcond, label %8, label %1
380 ; <label>:8 ; preds = %1
384 ; We CAN'T vectorize this example because it would entail a tail and an
385 ; induction is used outside the loop.
386 define i64 @example23d(i16* noalias nocapture %src, i32* noalias nocapture %dst) optsize {
387 ;CHECK-LABEL: @example23d(
392 ; <label>:1 ; preds = %1, %0
393 %.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
394 %.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
395 %i.02 = phi i64 [ 0, %0 ], [ %7, %1 ]
396 %2 = getelementptr inbounds i16, i16* %.04, i64 1
397 %3 = load i16, i16* %.04, align 2
398 %4 = zext i16 %3 to i32
399 %5 = shl nuw nsw i32 %4, 7
400 %6 = getelementptr inbounds i32, i32* %.013, i64 1
401 store i32 %5, i32* %.013, align 4
402 %7 = add nsw i64 %i.02, 1
403 %exitcond = icmp eq i64 %7, 257
404 br i1 %exitcond, label %8, label %1
406 ; <label>:8 ; preds = %1