1 ; int A[1024], B[1024];
3 ; void foo(int iCount, int c, int jCount)
8 ; #pragma clang loop vectorize(enable) vectorize_width(4)
9 ; for (i = 0; i < iCount; i++) {
11 ; for (j = 0; j < jCount; j++) {
16 ; RUN: opt -S -loop-vectorize -enable-vplan-native-path < %s | FileCheck %s
17 ; CHECK: %[[ZeroTripChk:.*]] = icmp sgt i32 %jCount, 0
18 ; CHECK-LABEL: vector.ph:
19 ; CHECK: %[[CVal0:.*]] = insertelement <4 x i32> undef, i32 %c, i32 0
20 ; CHECK-NEXT: %[[CSplat:.*]] = shufflevector <4 x i32> %[[CVal0]], <4 x i32> undef, <4 x i32> zeroinitializer
21 ; CHECK: %[[ZVal0:.*]] = insertelement <4 x i1> undef, i1 %[[ZeroTripChk]], i32 0
22 ; CHECK-NEXT: %[[ZSplat:.*]] = shufflevector <4 x i1> %[[ZVal0]], <4 x i1> undef, <4 x i32> zeroinitializer
24 ; CHECK-LABEL: vector.body:
25 ; CHECK: %[[Ind:.*]] = phi i64 [ 0, %vector.ph ], [ %[[IndNext:.*]], %[[ForInc:.*]] ]
26 ; CHECK: %[[VecInd:.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ %[[VecIndNext:.*]], %[[ForInc]] ]
27 ; CHECK: %[[AAddr:.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, <4 x i64> %[[VecInd]]
28 ; CHECK: call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %[[CSplat]], <4 x i32*> %[[AAddr]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
29 ; CHECK: %[[ZCmpExtr:.*]] = extractelement <4 x i1> %[[ZSplat]], i32 0
30 ; CHECK: br i1 %[[ZCmpExtr]], label %[[InnerForPh:.*]], label %[[OuterInc:.*]]
32 ; CHECK: [[InnerForPh]]:
33 ; CHECK: %[[WideAVal:.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %[[AAddr]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
34 ; CHECK: %[[VecIndTr:.*]] = trunc <4 x i64> %[[VecInd]] to <4 x i32>
35 ; CHECK: br label %[[InnerForBody:.*]]
37 ; CHECK: [[InnerForBody]]:
38 ; CHECK: %[[InnerInd:.*]] = phi <4 x i64> [ %[[InnerIndNext:.*]], %[[InnerForBody]] ], [ zeroinitializer, %[[InnerForPh]] ]
39 ; CHECK: %[[AccumPhi:.*]] = phi <4 x i32> [ %[[AccumPhiNext:.*]], %[[InnerForBody]] ], [ %[[WideAVal]], %[[InnerForPh]] ]
40 ; CHECK: %[[BAddr:.*]] = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, <4 x i64> %[[InnerInd]]
41 ; CHECK: %[[WideBVal:.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %[[BAddr]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
42 ; CHECK: %[[Add1:.*]] = add nsw <4 x i32> %[[WideBVal]], %[[VecIndTr]]
43 ; CHECK: %[[AccumPhiNext]] = add nsw <4 x i32> %[[Add1]], %[[AccumPhi]]
44 ; CHECK: %[[InnerIndNext]] = add nuw nsw <4 x i64> %[[InnerInd]], <i64 1, i64 1, i64 1, i64 1>
45 ; CHECK: %[[InnerVecCond:.*]] = icmp eq <4 x i64> %[[InnerIndNext]], {{.*}}
46 ; CHECK: %[[InnerCond:.+]] = extractelement <4 x i1> %[[InnerVecCond]], i32 0
47 ; CHECK: br i1 %[[InnerCond]], label %[[InnerCrit:.*]], label %[[InnerForBody]]
49 ; CHECK: [[InnerCrit]]:
50 ; CHECK: %[[StorePhi:.*]] = phi <4 x i32> [ %[[AccumPhiNext]], %[[InnerForBody]] ]
51 ; CHECK: call void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32> %[[StorePhi]], <4 x i32*> %[[AAddr]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
52 ; CHECK: br label %[[ForInc]]
55 ; CHECK: %[[IndNext]] = add i64 %[[Ind]], 4
56 ; CHECK: %[[VecIndNext]] = add <4 x i64> %[[VecInd]], <i64 4, i64 4, i64 4, i64 4>
57 ; CHECK: %[[Cmp:.*]] = icmp eq i64 %[[IndNext]], {{.*}}
58 ; CHECK: br i1 %[[Cmp]], label %middle.block, label %vector.body
60 @A = common global [1024 x i32] zeroinitializer, align 16
61 @B = common global [1024 x i32] zeroinitializer, align 16
63 ; Function Attrs: norecurse nounwind uwtable
64 define void @foo(i32 %iCount, i32 %c, i32 %jCount) {
66 %cmp22 = icmp sgt i32 %iCount, 0
67 br i1 %cmp22, label %for.body.lr.ph, label %for.end11
69 for.body.lr.ph: ; preds = %entry
70 %cmp220 = icmp sgt i32 %jCount, 0
71 %wide.trip.count = zext i32 %jCount to i64
72 %wide.trip.count27 = zext i32 %iCount to i64
75 for.body: ; preds = %for.inc9, %for.body.lr.ph
76 %indvars.iv25 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next26, %for.inc9 ]
77 %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv25
78 store i32 %c, i32* %arrayidx, align 4
79 br i1 %cmp220, label %for.body3.lr.ph, label %for.inc9
81 for.body3.lr.ph: ; preds = %for.body
82 %arrayidx.promoted = load i32, i32* %arrayidx, align 4
83 %0 = trunc i64 %indvars.iv25 to i32
86 for.body3: ; preds = %for.body3, %for.body3.lr.ph
87 %indvars.iv = phi i64 [ 0, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
88 %1 = phi i32 [ %arrayidx.promoted, %for.body3.lr.ph ], [ %add8, %for.body3 ]
89 %arrayidx5 = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv
90 %2 = load i32, i32* %arrayidx5, align 4
91 %add = add nsw i32 %2, %0
92 %add8 = add nsw i32 %add, %1
93 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
94 %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
95 br i1 %exitcond, label %for.cond1.for.inc9_crit_edge, label %for.body3
97 for.cond1.for.inc9_crit_edge: ; preds = %for.body3
98 store i32 %add8, i32* %arrayidx, align 4
101 for.inc9: ; preds = %for.cond1.for.inc9_crit_edge, %for.body
102 %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
103 %exitcond28 = icmp eq i64 %indvars.iv.next26, %wide.trip.count27
104 br i1 %exitcond28, label %for.end11, label %for.body, !llvm.loop !1
106 for.end11: ; preds = %for.inc9, %entry
110 !1 = distinct !{!1, !2, !3}
111 !2 = !{!"llvm.loop.vectorize.width", i32 4}
112 !3 = !{!"llvm.loop.vectorize.enable", i1 true}