1 ; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -dce -instcombine -S | FileCheck %s
2 ; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -dce -instcombine -S | FileCheck %s --check-prefix=UNROLL
3 ; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -S | FileCheck %s --check-prefix=UNROLL-NO-IC
4 ; RUN: opt < %s -loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -S | FileCheck %s --check-prefix=UNROLL-NO-VF
5 ; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s --check-prefix=SINK-AFTER
6 ; RUN: opt < %s -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s --check-prefix=NO-SINK-AFTER
8 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
10 ; void recurrence_1(int *a, int *b, int n) {
11 ; for(int i = 0; i < n; i++)
12 ; b[i] = a[i] + a[i - 1]
15 ; CHECK-LABEL: @recurrence_1(
17 ; CHECK: %vector.recur.init = insertelement <4 x i32> undef, i32 %pre_load, i32 3
19 ; CHECK: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
20 ; CHECK: [[L1]] = load <4 x i32>
21 ; CHECK: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
22 ; CHECK: middle.block:
23 ; CHECK: %vector.recur.extract = extractelement <4 x i32> [[L1]], i32 3
25 ; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %pre_load, %vector.memcheck ], [ %pre_load, %for.preheader ]
27 ; CHECK: %scalar.recur = phi i32 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
29 ; UNROLL-LABEL: @recurrence_1(
30 ; UNROLL: vector.body:
31 ; UNROLL: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
32 ; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i32>
33 ; UNROLL: [[L2]] = load <4 x i32>
34 ; UNROLL: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
35 ; UNROLL: {{.*}} = shufflevector <4 x i32> [[L1]], <4 x i32> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
36 ; UNROLL: middle.block:
37 ; UNROLL: %vector.recur.extract = extractelement <4 x i32> [[L2]], i32 3
39 define void @recurrence_1(i32* nocapture readonly %a, i32* nocapture %b, i32 %n) {
41 br label %for.preheader
44 %arrayidx.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 0
45 %pre_load = load i32, i32* %arrayidx.phi.trans.insert
49 %0 = phi i32 [ %pre_load, %for.preheader ], [ %1, %scalar.body ]
50 %indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ]
51 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
52 %arrayidx32 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
53 %1 = load i32, i32* %arrayidx32
54 %arrayidx34 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
55 %add35 = add i32 %1, %0
56 store i32 %add35, i32* %arrayidx34
57 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
58 %exitcond = icmp eq i32 %lftr.wideiv, %n
59 br i1 %exitcond, label %for.exit, label %scalar.body
65 ; int recurrence_2(int *a, int n) {
67 ; for (int i = 0; i < n; ++i)
68 ; minmax = min(minmax, max(a[i] - a[i-1], 0));
72 ; CHECK-LABEL: @recurrence_2(
74 ; CHECK: %vector.recur.init = insertelement <4 x i32> undef, i32 %.pre, i32 3
76 ; CHECK: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
77 ; CHECK: [[L1]] = load <4 x i32>
78 ; CHECK: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
79 ; CHECK: middle.block:
80 ; CHECK: %vector.recur.extract = extractelement <4 x i32> [[L1]], i32 3
82 ; CHECK: %scalar.recur.init = phi i32 [ %vector.recur.extract, %middle.block ], [ %.pre, %for.preheader ]
84 ; CHECK: %scalar.recur = phi i32 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
86 ; UNROLL-LABEL: @recurrence_2(
87 ; UNROLL: vector.body:
88 ; UNROLL: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
89 ; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i32>
90 ; UNROLL: [[L2]] = load <4 x i32>
91 ; UNROLL: {{.*}} = shufflevector <4 x i32> %vector.recur, <4 x i32> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
92 ; UNROLL: {{.*}} = shufflevector <4 x i32> [[L1]], <4 x i32> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
93 ; UNROLL: middle.block:
94 ; UNROLL: %vector.recur.extract = extractelement <4 x i32> [[L2]], i32 3
96 define i32 @recurrence_2(i32* nocapture readonly %a, i32 %n) {
98 %cmp27 = icmp sgt i32 %n, 0
99 br i1 %cmp27, label %for.preheader, label %for.cond.cleanup
102 %arrayidx2.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 -1
103 %.pre = load i32, i32* %arrayidx2.phi.trans.insert, align 4
104 br label %scalar.body
106 for.cond.cleanup.loopexit:
107 %minmax.0.cond.lcssa = phi i32 [ %minmax.0.cond, %scalar.body ]
108 br label %for.cond.cleanup
111 %minmax.0.lcssa = phi i32 [ undef, %entry ], [ %minmax.0.cond.lcssa, %for.cond.cleanup.loopexit ]
112 ret i32 %minmax.0.lcssa
115 %0 = phi i32 [ %.pre, %for.preheader ], [ %1, %scalar.body ]
116 %indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ]
117 %minmax.028 = phi i32 [ undef, %for.preheader ], [ %minmax.0.cond, %scalar.body ]
118 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
119 %1 = load i32, i32* %arrayidx, align 4
120 %sub3 = sub nsw i32 %1, %0
121 %cmp4 = icmp sgt i32 %sub3, 0
122 %cond = select i1 %cmp4, i32 %sub3, i32 0
123 %cmp5 = icmp slt i32 %minmax.028, %cond
124 %minmax.0.cond = select i1 %cmp5, i32 %minmax.028, i32 %cond
125 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
126 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
127 %exitcond = icmp eq i32 %lftr.wideiv, %n
128 br i1 %exitcond, label %for.cond.cleanup.loopexit, label %scalar.body
131 ; void recurrence_3(short *a, double *b, int n, float f, short p) {
132 ; b[0] = (double)a[0] - f * (double)p;
133 ; for (int i = 1; i < n; i++)
134 ; b[i] = (double)a[i] - f * (double)a[i - 1];
137 ; CHECK-LABEL: @recurrence_3(
139 ; CHECK: %vector.recur.init = insertelement <4 x i16> undef, i16 %0, i32 3
140 ; CHECK: vector.body:
141 ; CHECK: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ [[L1:%[a-zA-Z0-9.]+]], %vector.body ]
142 ; CHECK: [[L1]] = load <4 x i16>
143 ; CHECK: [[SHUF:%[a-zA-Z0-9.]+]] = shufflevector <4 x i16> %vector.recur, <4 x i16> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
144 ; Check also that the casts were not moved needlessly.
145 ; CHECK: sitofp <4 x i16> [[L1]] to <4 x double>
146 ; CHECK: sitofp <4 x i16> [[SHUF]] to <4 x double>
147 ; CHECK: middle.block:
148 ; CHECK: %vector.recur.extract = extractelement <4 x i16> [[L1]], i32 3
150 ; CHECK: %scalar.recur.init = phi i16 [ %vector.recur.extract, %middle.block ], [ %0, %vector.memcheck ], [ %0, %for.preheader ]
151 ; CHECK: scalar.body:
152 ; CHECK: %scalar.recur = phi i16 [ %scalar.recur.init, %scalar.ph ], [ {{.*}}, %scalar.body ]
154 ; UNROLL-LABEL: @recurrence_3(
155 ; UNROLL: vector.body:
156 ; UNROLL: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ [[L2:%[a-zA-Z0-9.]+]], %vector.body ]
157 ; UNROLL: [[L1:%[a-zA-Z0-9.]+]] = load <4 x i16>
158 ; UNROLL: [[L2]] = load <4 x i16>
159 ; UNROLL: {{.*}} = shufflevector <4 x i16> %vector.recur, <4 x i16> [[L1]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
160 ; UNROLL: {{.*}} = shufflevector <4 x i16> [[L1]], <4 x i16> [[L2]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
161 ; UNROLL: middle.block:
162 ; UNROLL: %vector.recur.extract = extractelement <4 x i16> [[L2]], i32 3
164 define void @recurrence_3(i16* nocapture readonly %a, double* nocapture %b, i32 %n, float %f, i16 %p) {
166 %0 = load i16, i16* %a, align 2
167 %conv = sitofp i16 %0 to double
168 %conv1 = fpext float %f to double
169 %conv2 = sitofp i16 %p to double
170 %mul = fmul fast double %conv2, %conv1
171 %sub = fsub fast double %conv, %mul
172 store double %sub, double* %b, align 8
173 %cmp25 = icmp sgt i32 %n, 1
174 br i1 %cmp25, label %for.preheader, label %for.end
177 br label %scalar.body
180 %1 = phi i16 [ %0, %for.preheader ], [ %2, %scalar.body ]
181 %advars.iv = phi i64 [ %advars.iv.next, %scalar.body ], [ 1, %for.preheader ]
182 %arrayidx5 = getelementptr inbounds i16, i16* %a, i64 %advars.iv
183 %2 = load i16, i16* %arrayidx5, align 2
184 %conv6 = sitofp i16 %2 to double
185 %conv11 = sitofp i16 %1 to double
186 %mul12 = fmul fast double %conv11, %conv1
187 %sub13 = fsub fast double %conv6, %mul12
188 %arrayidx15 = getelementptr inbounds double, double* %b, i64 %advars.iv
189 store double %sub13, double* %arrayidx15, align 8
190 %advars.iv.next = add nuw nsw i64 %advars.iv, 1
191 %lftr.wideiv = trunc i64 %advars.iv.next to i32
192 %exitcond = icmp eq i32 %lftr.wideiv, %n
193 br i1 %exitcond, label %for.end.loopexit, label %scalar.body
202 ; void PR26734(short *a, int *b, int *c, int d, short *e) {
203 ; for (; d != 21; d++) {
210 ; CHECK-LABEL: @PR26734(
211 ; CHECK-NOT: vector.ph:
214 define void @PR26734(i16* %a, i32* %b, i32* %c, i32 %d, i16* %e) {
216 %cmp4 = icmp eq i32 %d, 21
217 br i1 %cmp4, label %entry.for.end_crit_edge, label %for.body.lr.ph
219 entry.for.end_crit_edge:
220 %.pre = load i32, i32* %b, align 4
224 %0 = load i16, i16* %a, align 2
225 %sub = add i16 %0, -6
226 %conv2 = sext i16 %sub to i32
227 %c.promoted = load i32, i32* %c, align 4
228 %b.promoted = load i32, i32* %b, align 4
232 %inc7 = phi i32 [ %d, %for.body.lr.ph ], [ %inc, %for.body ]
233 %and6 = phi i32 [ %b.promoted, %for.body.lr.ph ], [ %and, %for.body ]
234 %conv25 = phi i32 [ %c.promoted, %for.body.lr.ph ], [ %conv2, %for.body ]
235 %and = and i32 %and6, %conv25
236 %inc = add nsw i32 %inc7, 1
237 %cmp = icmp eq i32 %inc, 21
238 br i1 %cmp, label %for.cond.for.end_crit_edge, label %for.body
240 for.cond.for.end_crit_edge:
241 %and.lcssa = phi i32 [ %and, %for.body ]
242 store i32 %conv2, i32* %c, align 4
243 store i32 %and.lcssa, i32* %b, align 4
244 store i16 %sub, i16* %e, align 2
253 ; for (int i = 1; i < 49; ++i) {
254 ; for (int k = i; k > 1; --k)
261 ; CHECK-LABEL: @PR27246(
262 ; CHECK-NOT: vector.ph:
265 define i32 @PR27246() {
267 br label %for.cond1.preheader
270 %i.016 = phi i32 [ 1, %entry ], [ %inc, %for.cond.cleanup3 ]
271 %e.015 = phi i32 [ undef, %entry ], [ %e.1.lcssa, %for.cond.cleanup3 ]
275 %e.1.lcssa.lcssa = phi i32 [ %e.1.lcssa, %for.cond.cleanup3 ]
276 ret i32 %e.1.lcssa.lcssa
279 %e.1 = phi i32 [ %k.0, %for.cond1 ], [ %e.015, %for.cond1.preheader ]
280 %k.0 = phi i32 [ %dec, %for.cond1 ], [ %i.016, %for.cond1.preheader ]
281 %cmp2 = icmp sgt i32 %k.0, 1
282 %dec = add nsw i32 %k.0, -1
283 br i1 %cmp2, label %for.cond1, label %for.cond.cleanup3
286 %e.1.lcssa = phi i32 [ %e.1, %for.cond1 ]
287 %inc = add nuw nsw i32 %i.016, 1
288 %exitcond = icmp eq i32 %inc, 49
289 br i1 %exitcond, label %for.cond.cleanup, label %for.cond1.preheader
292 ; UNROLL-NO-IC-LABEL: @PR30183(
293 ; UNROLL-NO-IC: vector.ph:
294 ; UNROLL-NO-IC: [[VECTOR_RECUR_INIT:%.*]] = insertelement <4 x i32> undef, i32 [[PRE_LOAD:%.*]], i32 3
295 ; UNROLL-NO-IC-NEXT: br label %vector.body
296 ; UNROLL-NO-IC: vector.body:
297 ; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
298 ; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ [[VECTOR_RECUR_INIT]], %vector.ph ], [ [[TMP42:%.*]], %vector.body ]
299 ; UNROLL-NO-IC: [[TMP27:%.*]] = load i32, i32* {{.*}}
300 ; UNROLL-NO-IC-NEXT: [[TMP28:%.*]] = load i32, i32* {{.*}}
301 ; UNROLL-NO-IC-NEXT: [[TMP29:%.*]] = load i32, i32* {{.*}}
302 ; UNROLL-NO-IC-NEXT: [[TMP30:%.*]] = load i32, i32* {{.*}}
303 ; UNROLL-NO-IC-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> undef, i32 [[TMP27]], i32 0
304 ; UNROLL-NO-IC-NEXT: [[TMP36:%.*]] = insertelement <4 x i32> [[TMP35]], i32 [[TMP28]], i32 1
305 ; UNROLL-NO-IC-NEXT: [[TMP37:%.*]] = insertelement <4 x i32> [[TMP36]], i32 [[TMP29]], i32 2
306 ; UNROLL-NO-IC-NEXT: [[TMP38:%.*]] = insertelement <4 x i32> [[TMP37]], i32 [[TMP30]], i32 3
307 ; UNROLL-NO-IC-NEXT: [[TMP31:%.*]] = load i32, i32* {{.*}}
308 ; UNROLL-NO-IC-NEXT: [[TMP32:%.*]] = load i32, i32* {{.*}}
309 ; UNROLL-NO-IC-NEXT: [[TMP33:%.*]] = load i32, i32* {{.*}}
310 ; UNROLL-NO-IC-NEXT: [[TMP34:%.*]] = load i32, i32* {{.*}}
311 ; UNROLL-NO-IC-NEXT: [[TMP39:%.*]] = insertelement <4 x i32> undef, i32 [[TMP31]], i32 0
312 ; UNROLL-NO-IC-NEXT: [[TMP40:%.*]] = insertelement <4 x i32> [[TMP39]], i32 [[TMP32]], i32 1
313 ; UNROLL-NO-IC-NEXT: [[TMP41:%.*]] = insertelement <4 x i32> [[TMP40]], i32 [[TMP33]], i32 2
314 ; UNROLL-NO-IC-NEXT: [[TMP42]] = insertelement <4 x i32> [[TMP41]], i32 [[TMP34]], i32 3
315 ; UNROLL-NO-IC-NEXT: [[TMP43:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[TMP38]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
316 ; UNROLL-NO-IC-NEXT: [[TMP44:%.*]] = shufflevector <4 x i32> [[TMP38]], <4 x i32> [[TMP42]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
317 ; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
318 ; UNROLL-NO-IC: br i1 {{.*}}, label %middle.block, label %vector.body
320 define void @PR30183(i32 %pre_load, i32* %a, i32* %b, i64 %n) {
322 br label %scalar.body
325 %i = phi i64 [ 0, %entry ], [ %i.next, %scalar.body ]
326 %tmp0 = phi i32 [ %pre_load, %entry ], [ %tmp2, %scalar.body ]
327 %i.next = add nuw nsw i64 %i, 2
328 %tmp1 = getelementptr inbounds i32, i32* %a, i64 %i.next
329 %tmp2 = load i32, i32* %tmp1
330 %cond = icmp eq i64 %i.next,%n
331 br i1 %cond, label %for.end, label %scalar.body
337 ; UNROLL-NO-IC-LABEL: @constant_folded_previous_value(
338 ; UNROLL-NO-IC: vector.body:
339 ; UNROLL-NO-IC: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 undef, i64 undef, i64 undef, i64 0>, %vector.ph ], [ <i64 1, i64 1, i64 1, i64 1>, %vector.body ]
340 ; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> <i64 1, i64 1, i64 1, i64 1>, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
341 ; UNROLL-NO-IC: br i1 {{.*}}, label %middle.block, label %vector.body
343 define void @constant_folded_previous_value() {
345 br label %scalar.body
348 %i = phi i64 [ 0, %entry ], [ %i.next, %scalar.body ]
349 %tmp2 = phi i64 [ 0, %entry ], [ %tmp3, %scalar.body ]
351 %i.next = add nuw nsw i64 %i, 1
352 %cond = icmp eq i64 %i.next, undef
353 br i1 %cond, label %for.end, label %scalar.body
359 ; We vectorize this first order recurrence, by generating two
360 ; extracts for the phi `val.phi` - one at the last index and
361 ; another at the second last index. We need these 2 extracts because
362 ; the first order recurrence phi is used outside the loop, so we require the phi
363 ; itself and not its update (addx).
364 ; UNROLL-NO-IC-LABEL: extract_second_last_iteration
365 ; UNROLL-NO-IC: vector.body
366 ; UNROLL-NO-IC: %step.add = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
367 ; UNROLL-NO-IC: %[[L1:.+]] = add <4 x i32> %vec.ind, %broadcast.splat
368 ; UNROLL-NO-IC: %[[L2:.+]] = add <4 x i32> %step.add, %broadcast.splat
369 ; UNROLL-NO-IC: %index.next = add i32 %index, 8
370 ; UNROLL-NO-IC: icmp eq i32 %index.next, 96
371 ; UNROLL-NO-IC: middle.block
372 ; UNROLL-NO-IC: icmp eq i32 96, 96
373 ; UNROLL-NO-IC: %vector.recur.extract = extractelement <4 x i32> %[[L2]], i32 3
374 ; UNROLL-NO-IC: %vector.recur.extract.for.phi = extractelement <4 x i32> %[[L2]], i32 2
375 ; UNROLL-NO-IC: for.end
376 ; UNROLL-NO-IC: %val.phi.lcssa = phi i32 [ %scalar.recur, %for.body ], [ %vector.recur.extract.for.phi, %middle.block ]
377 ; Check the case when unrolled but not vectorized.
378 ; UNROLL-NO-VF-LABEL: extract_second_last_iteration
379 ; UNROLL-NO-VF: vector.body:
380 ; UNROLL-NO-VF: %induction = add i32 %index, 0
381 ; UNROLL-NO-VF: %induction1 = add i32 %index, 1
382 ; UNROLL-NO-VF: %[[L1:.+]] = add i32 %induction, %x
383 ; UNROLL-NO-VF: %[[L2:.+]] = add i32 %induction1, %x
384 ; UNROLL-NO-VF: %index.next = add i32 %index, 2
385 ; UNROLL-NO-VF: icmp eq i32 %index.next, 96
386 ; UNROLL-NO-VF: for.end:
387 ; UNROLL-NO-VF: %val.phi.lcssa = phi i32 [ %scalar.recur, %for.body ], [ %[[L1]], %middle.block ]
388 define i32 @extract_second_last_iteration(i32* %cval, i32 %x) {
393 %inc.phi = phi i32 [ 0, %entry ], [ %inc, %for.body ]
394 %val.phi = phi i32 [ 0, %entry ], [ %addx, %for.body ]
395 %inc = add i32 %inc.phi, 1
396 %bc = zext i32 %inc.phi to i64
397 %addx = add i32 %inc.phi, %x
398 %cmp = icmp eq i32 %inc.phi, 95
399 br i1 %cmp, label %for.end, label %for.body
405 ; We vectorize this first order recurrence, with a set of insertelements for
406 ; each unrolled part. Make sure these insertelements are generated in-order,
407 ; because the shuffle of the first order recurrence will be added after the
408 ; insertelement of the last part UF - 1, assuming the latter appears after the
409 ; insertelements of all other parts.
411 ; int PR33613(double *b, double j, int d) {
413 ; for(int i = 0; i < 10240; i++, b+=25) {
414 ; double f = b[d]; // Scalarize to form insertelements
422 ; UNROLL-NO-IC-LABEL: @PR33613(
423 ; UNROLL-NO-IC: vector.body:
424 ; UNROLL-NO-IC: [[VECTOR_RECUR:%.*]] = phi <4 x double>
425 ; UNROLL-NO-IC: shufflevector <4 x double> [[VECTOR_RECUR]], <4 x double> {{.*}}, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
426 ; UNROLL-NO-IC-NEXT: shufflevector <4 x double> {{.*}}, <4 x double> {{.*}}, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
427 ; UNROLL-NO-IC-NOT: insertelement <4 x double>
428 ; UNROLL-NO-IC: middle.block:
430 define i32 @PR33613(double* %b, double %j, i32 %d) {
432 %idxprom = sext i32 %d to i64
436 %a.1.lcssa = phi i32 [ %a.1, %for.body ]
440 %b.addr.012 = phi double* [ %b, %entry ], [ %add.ptr, %for.body ]
441 %i.011 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
442 %a.010 = phi i32 [ 0, %entry ], [ %a.1, %for.body ]
443 %j.addr.09 = phi double [ %j, %entry ], [ %0, %for.body ]
444 %arrayidx = getelementptr inbounds double, double* %b.addr.012, i64 %idxprom
445 %0 = load double, double* %arrayidx, align 8
446 %mul = fmul double %j.addr.09, %0
447 %tobool = fcmp une double %mul, 0.000000e+00
448 %inc = zext i1 %tobool to i32
449 %a.1 = add nsw i32 %a.010, %inc
450 %inc1 = add nuw nsw i32 %i.011, 1
451 %add.ptr = getelementptr inbounds double, double* %b.addr.012, i64 25
452 %exitcond = icmp eq i32 %inc1, 10240
453 br i1 %exitcond, label %for.cond.cleanup, label %for.body
456 ; void sink_after(short *a, int n, int *b) {
457 ; for(int i = 0; i < n; i++)
458 ; b[i] = (a[i] * a[i + 1]);
461 ; SINK-AFTER-LABEL: sink_after
462 ; Check that the sext sank after the load in the vector loop.
463 ; SINK-AFTER: vector.body
464 ; SINK-AFTER: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ %wide.load, %vector.body ]
465 ; SINK-AFTER: %wide.load = load <4 x i16>
466 ; SINK-AFTER: %[[VSHUF:.+]] = shufflevector <4 x i16> %vector.recur, <4 x i16> %wide.load, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
467 ; SINK-AFTER: %[[VCONV:.+]] = sext <4 x i16> %[[VSHUF]] to <4 x i32>
468 ; SINK-AFTER: %[[VCONV3:.+]] = sext <4 x i16> %wide.load to <4 x i32>
469 ; SINK-AFTER: mul nsw <4 x i32> %[[VCONV3]], %[[VCONV]]
471 define void @sink_after(i16* %a, i32* %b, i64 %n) {
473 %.pre = load i16, i16* %a
477 %0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ]
478 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
479 %conv = sext i16 %0 to i32
480 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
481 %arrayidx2 = getelementptr inbounds i16, i16* %a, i64 %indvars.iv.next
482 %1 = load i16, i16* %arrayidx2
483 %conv3 = sext i16 %1 to i32
484 %mul = mul nsw i32 %conv3, %conv
485 %arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
486 store i32 %mul, i32* %arrayidx5
487 %exitcond = icmp eq i64 %indvars.iv.next, %n
488 br i1 %exitcond, label %for.end, label %for.body
494 ; PR34711: given three consecutive instructions such that the first will be
495 ; widened, the second is a cast that will be widened and needs to sink after the
496 ; third, and the third is a first-order-recurring load that will be replicated
497 ; instead of widened. Although the cast and the first instruction will both be
498 ; widened, and are originally adjacent to each other, make sure the replicated
499 ; load ends up appearing between them.
501 ; void PR34711(short[2] *a, int *b, int *c, int n) {
502 ; for(int i = 0; i < n; i++) {
504 ; b[i] = (a[i][0] * a[i][1]);
508 ; SINK-AFTER-LABEL: @PR34711
509 ; Check that the sext sank after the load in the vector loop.
510 ; SINK-AFTER: vector.body
511 ; SINK-AFTER: %vector.recur = phi <4 x i16> [ %vector.recur.init, %vector.ph ], [ {{.*}}, %vector.body ]
512 ; SINK-AFTER: %[[VSHUF:.+]] = shufflevector <4 x i16> %vector.recur, <4 x i16> %{{.*}}, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
513 ; SINK-AFTER: %[[VCONV:.+]] = sext <4 x i16> %[[VSHUF]] to <4 x i32>
514 ; SINK-AFTER: %[[VCONV3:.+]] = sext <4 x i16> {{.*}} to <4 x i32>
515 ; SINK-AFTER: mul nsw <4 x i32> %[[VCONV3]], %[[VCONV]]
517 define void @PR34711([2 x i16]* %a, i32* %b, i32* %c, i64 %n) {
519 %pre.index = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 0, i64 0
520 %.pre = load i16, i16* %pre.index
524 %0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ]
525 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
526 %arraycidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
527 %cur.index = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %indvars.iv, i64 1
528 store i32 7, i32* %arraycidx ; 1st instruction, to be widened.
529 %conv = sext i16 %0 to i32 ; 2nd, cast to sink after third.
530 %1 = load i16, i16* %cur.index ; 3rd, first-order-recurring load not widened.
531 %conv3 = sext i16 %1 to i32
532 %mul = mul nsw i32 %conv3, %conv
533 %arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
534 store i32 %mul, i32* %arrayidx5
535 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
536 %exitcond = icmp eq i64 %indvars.iv.next, %n
537 br i1 %exitcond, label %for.end, label %for.body
543 ; void no_sink_after(short *a, int n, int *b) {
544 ; for(int i = 0; i < n; i++)
545 ; b[i] = ((a[i] + 2) * a[i + 1]);
548 ; NO-SINK-AFTER-LABEL: no_sink_after
549 ; NO-SINK-AFTER-NOT: vector.ph:
552 define void @no_sink_after(i16* %a, i32* %b, i64 %n) {
554 %.pre = load i16, i16* %a
558 %0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ]
559 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
560 %conv = sext i16 %0 to i32
561 %add = add nsw i32 %conv, 2
562 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
563 %arrayidx2 = getelementptr inbounds i16, i16* %a, i64 %indvars.iv.next
564 %1 = load i16, i16* %arrayidx2
565 %conv3 = sext i16 %1 to i32
566 %mul = mul nsw i32 %add, %conv3
567 %arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
568 store i32 %mul, i32* %arrayidx5
569 %exitcond = icmp eq i64 %indvars.iv.next, %n
570 br i1 %exitcond, label %for.end, label %for.body