1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -loop-vectorize -mcpu=corei7-avx -S -vectorizer-min-trip-count=21 | FileCheck %s
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-unknown-linux"
8 ; The source code for the test:
10 ; void foo(float* restrict A, float* restrict B)
12 ; for (int i = 0; i < 20; ++i) A[i] += B[i];
17 ; This loop will be vectorized, although the trip count is below the threshold, but vectorization is explicitly forced in metadata.
19 define void @vectorized(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
20 ; CHECK-LABEL: @vectorized(
22 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
24 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
26 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
27 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> undef, i64 [[INDEX]], i32 0
28 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> undef, <8 x i32> zeroinitializer
29 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
30 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
31 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[TMP0]]
32 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* [[TMP1]], i32 0
33 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <8 x float>*
34 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, <8 x float>* [[TMP3]], align 4
35 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP0]]
36 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, float* [[TMP4]], i32 0
37 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[TMP5]] to <8 x float>*
38 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, <8 x float>* [[TMP6]], align 4
39 ; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <8 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
40 ; CHECK-NEXT: [[TMP8:%.*]] = bitcast float* [[TMP5]] to <8 x float>*
41 ; CHECK-NEXT: store <8 x float> [[TMP7]], <8 x float>* [[TMP8]], align 4
42 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
43 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
44 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !1
45 ; CHECK: middle.block:
46 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 20, 16
47 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
49 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
50 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
52 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
53 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDVARS_IV]]
54 ; CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !0
55 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV]]
56 ; CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !0
57 ; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP10]], [[TMP11]]
58 ; CHECK-NEXT: store float [[ADD]], float* [[ARRAYIDX2]], align 4, !llvm.access.group !0
59 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
60 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 20
61 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !4
63 ; CHECK-NEXT: ret void
69 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
70 %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
71 %0 = load float, float* %arrayidx, align 4, !llvm.access.group !11
72 %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
73 %1 = load float, float* %arrayidx2, align 4, !llvm.access.group !11
74 %add = fadd fast float %0, %1
75 store float %add, float* %arrayidx2, align 4, !llvm.access.group !11
76 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
77 %exitcond = icmp eq i64 %indvars.iv.next, 20
78 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
84 !1 = !{!1, !2, !{!"llvm.loop.parallel_accesses", !11}}
85 !2 = !{!"llvm.loop.vectorize.enable", i1 true}
89 ; This loop will be vectorized as the trip count is below the threshold but no
90 ; scalar iterations are needed thanks to folding its tail.
92 define void @vectorized1(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
93 ; CHECK-LABEL: @vectorized1(
95 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
97 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
99 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
100 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> undef, i64 [[INDEX]], i32 0
101 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> undef, <8 x i32> zeroinitializer
102 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
103 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
104 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[TMP0]]
105 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* [[TMP1]], i32 0
106 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <8 x float>*
107 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, <8 x float>* [[TMP3]], align 4
108 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP0]]
109 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, float* [[TMP4]], i32 0
110 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[TMP5]] to <8 x float>*
111 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, <8 x float>* [[TMP6]], align 4
112 ; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <8 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
113 ; CHECK-NEXT: [[TMP8:%.*]] = icmp ule <8 x i64> [[INDUCTION]], <i64 19, i64 19, i64 19, i64 19, i64 19, i64 19, i64 19, i64 19>
114 ; CHECK-NEXT: [[TMP9:%.*]] = bitcast float* [[TMP5]] to <8 x float>*
115 ; CHECK-NEXT: call void @llvm.masked.store.v8f32.p0v8f32(<8 x float> [[TMP7]], <8 x float>* [[TMP9]], i32 4, <8 x i1> [[TMP8]])
116 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
117 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
118 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !7
119 ; CHECK: middle.block:
120 ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
123 ; CHECK-NEXT: ret void
129 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
130 %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
131 %0 = load float, float* %arrayidx, align 4, !llvm.access.group !13
132 %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
133 %1 = load float, float* %arrayidx2, align 4, !llvm.access.group !13
134 %add = fadd fast float %0, %1
135 store float %add, float* %arrayidx2, align 4, !llvm.access.group !13
136 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
137 %exitcond = icmp eq i64 %indvars.iv.next, 20
138 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
144 !3 = !{!3, !{!"llvm.loop.parallel_accesses", !13}}
148 ; This loop will be vectorized as the trip count is below the threshold but no
149 ; scalar iterations are needed.
151 define void @vectorized2(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
152 ; CHECK-LABEL: @vectorized2(
154 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
156 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
157 ; CHECK: vector.body:
158 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
159 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> undef, i64 [[INDEX]], i32 0
160 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> undef, <8 x i32> zeroinitializer
161 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
162 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
163 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[TMP0]]
164 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* [[TMP1]], i32 0
165 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to <8 x float>*
166 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x float>, <8 x float>* [[TMP3]], align 4
167 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP0]]
168 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, float* [[TMP4]], i32 0
169 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[TMP5]] to <8 x float>*
170 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x float>, <8 x float>* [[TMP6]], align 4
171 ; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <8 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
172 ; CHECK-NEXT: [[TMP8:%.*]] = bitcast float* [[TMP5]] to <8 x float>*
173 ; CHECK-NEXT: store <8 x float> [[TMP7]], <8 x float>* [[TMP8]], align 4
174 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
175 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
176 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !10
177 ; CHECK: middle.block:
178 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 16, 16
179 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
181 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
182 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
184 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
185 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDVARS_IV]]
186 ; CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group !6
187 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV]]
188 ; CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX2]], align 4, !llvm.access.group !6
189 ; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP10]], [[TMP11]]
190 ; CHECK-NEXT: store float [[ADD]], float* [[ARRAYIDX2]], align 4, !llvm.access.group !6
191 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
192 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 16
193 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop !11
195 ; CHECK-NEXT: ret void
201 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
202 %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
203 %0 = load float, float* %arrayidx, align 4, !llvm.access.group !13
204 %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
205 %1 = load float, float* %arrayidx2, align 4, !llvm.access.group !13
206 %add = fadd fast float %0, %1
207 store float %add, float* %arrayidx2, align 4, !llvm.access.group !13
208 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
209 %exitcond = icmp eq i64 %indvars.iv.next, 16
210 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !4