1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=loop-vectorize,dce,instcombine -scalable-vectorization=on -force-target-instruction-cost=1 -force-target-supports-scalable-vectors < %s -S | FileCheck %s
4 ; Test that we can add on the induction variable
5 ; for (long long i = 0; i < n; i++) {
8 ; with an unroll factor (interleave count) of 2.
10 define void @add_ind64_unrolled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i64 %n) {
11 ; CHECK-LABEL: @add_ind64_unrolled(
13 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
14 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
15 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
16 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
18 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
19 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
20 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
21 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
22 ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
23 ; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP4]], 1
24 ; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 2
25 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
26 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0
27 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
28 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
30 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
31 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
32 ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
33 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
34 ; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
35 ; CHECK-NEXT: [[DOTIDX:%.*]] = shl i64 [[TMP10]], 4
36 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[DOTIDX]]
37 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP9]], align 8
38 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i64>, ptr [[TMP11]], align 8
39 ; CHECK-NEXT: [[TMP12:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_LOAD]], [[VEC_IND]]
40 ; CHECK-NEXT: [[TMP13:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_LOAD2]], [[STEP_ADD]]
41 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
42 ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
43 ; CHECK-NEXT: [[DOTIDX3:%.*]] = shl i64 [[TMP15]], 4
44 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP14]], i64 [[DOTIDX3]]
45 ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP12]], ptr [[TMP14]], align 8
46 ; CHECK-NEXT: store <vscale x 2 x i64> [[TMP13]], ptr [[TMP16]], align 8
47 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
48 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[STEP_ADD]], [[DOTSPLAT]]
49 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
50 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
51 ; CHECK: middle.block:
52 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
53 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
55 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
56 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
58 ; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
59 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[I_08]]
60 ; CHECK-NEXT: [[TMP18:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
61 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP18]], [[I_08]]
62 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[I_08]]
63 ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX1]], align 8
64 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
65 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
66 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
68 ; CHECK-NEXT: ret void
73 for.body: ; preds = %entry, %for.body
74 %i.08 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
75 %arrayidx = getelementptr inbounds i64, ptr %b, i64 %i.08
76 %0 = load i64, ptr %arrayidx, align 8
77 %add = add nsw i64 %0, %i.08
78 %arrayidx1 = getelementptr inbounds i64, ptr %a, i64 %i.08
79 store i64 %add, ptr %arrayidx1, align 8
80 %inc = add nuw nsw i64 %i.08, 1
81 %exitcond.not = icmp eq i64 %inc, %n
82 br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !0
84 exit: ; preds = %for.body
89 ; Same as above, except we test with a vectorisation factor of (1, scalable)
91 define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i64 %n) {
92 ; CHECK-LABEL: @add_ind64_unrolled_nxv1i64(
94 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
95 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 1
96 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
97 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
99 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
100 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1
101 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
102 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
103 ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
104 ; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 1
105 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
106 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[TMP4]], i64 0
107 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[DOTSPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
108 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
109 ; CHECK: vector.body:
110 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
111 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 1 x i64> [ [[TMP6]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
112 ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 1 x i64> [[VEC_IND]], [[DOTSPLAT]]
113 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
114 ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
115 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i64 [[TMP9]]
116 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 1 x i64>, ptr [[TMP8]], align 8
117 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 1 x i64>, ptr [[TMP10]], align 8
118 ; CHECK-NEXT: [[TMP11:%.*]] = add nsw <vscale x 1 x i64> [[WIDE_LOAD]], [[VEC_IND]]
119 ; CHECK-NEXT: [[TMP12:%.*]] = add nsw <vscale x 1 x i64> [[WIDE_LOAD2]], [[STEP_ADD]]
120 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
121 ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
122 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[TMP13]], i64 [[TMP14]]
123 ; CHECK-NEXT: store <vscale x 1 x i64> [[TMP11]], ptr [[TMP13]], align 8
124 ; CHECK-NEXT: store <vscale x 1 x i64> [[TMP12]], ptr [[TMP15]], align 8
125 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
126 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 1 x i64> [[STEP_ADD]], [[DOTSPLAT]]
127 ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
128 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
129 ; CHECK: middle.block:
130 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
131 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
133 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
134 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
136 ; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
137 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[I_08]]
138 ; CHECK-NEXT: [[TMP17:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
139 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP17]], [[I_08]]
140 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[I_08]]
141 ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX1]], align 8
142 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
143 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
144 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
146 ; CHECK-NEXT: ret void
151 for.body: ; preds = %entry, %for.body
152 %i.08 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
153 %arrayidx = getelementptr inbounds i64, ptr %b, i64 %i.08
154 %0 = load i64, ptr %arrayidx, align 8
155 %add = add nsw i64 %0, %i.08
156 %arrayidx1 = getelementptr inbounds i64, ptr %a, i64 %i.08
157 store i64 %add, ptr %arrayidx1, align 8
158 %inc = add nuw nsw i64 %i.08, 1
159 %exitcond.not = icmp eq i64 %inc, %n
160 br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !9
162 exit: ; preds = %for.body
167 ; Test that we can vectorize a separate induction variable (not used for the branch)
169 ; for (long long i = 0; i < n; i++) {
173 ; with an unroll factor (interleave count) of 1.
176 define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) {
177 ; CHECK-LABEL: @add_unique_ind32(
179 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
180 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
181 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
182 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
184 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
185 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
186 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
187 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
188 ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
189 ; CHECK-NEXT: [[IND_END:%.*]] = shl i32 [[DOTCAST]], 1
190 ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
191 ; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 2
192 ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
193 ; CHECK-NEXT: [[TMP7:%.*]] = shl <vscale x 4 x i32> [[TMP6]], splat (i32 1)
194 ; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP5]] to i32
195 ; CHECK-NEXT: [[TMP9:%.*]] = shl i32 [[TMP8]], 1
196 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP9]], i64 0
197 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
198 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
199 ; CHECK: vector.body:
200 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
201 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
202 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
203 ; CHECK-NEXT: store <vscale x 4 x i32> [[VEC_IND]], ptr [[TMP10]], align 4
204 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
205 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
206 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
207 ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
208 ; CHECK: middle.block:
209 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
210 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
212 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
213 ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
214 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
216 ; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
217 ; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
218 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_08]]
219 ; CHECK-NEXT: store i32 [[R_07]], ptr [[ARRAYIDX]], align 4
220 ; CHECK-NEXT: [[ADD]] = add nuw nsw i32 [[R_07]], 2
221 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
222 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
223 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
225 ; CHECK-NEXT: ret void
230 for.body: ; preds = %entry, %for.body
231 %i.08 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
232 %r.07 = phi i32 [ %add, %for.body ], [ 0, %entry ]
233 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %i.08
234 store i32 %r.07, ptr %arrayidx, align 4
235 %add = add nuw nsw i32 %r.07, 2
236 %inc = add nuw nsw i64 %i.08, 1
237 %exitcond.not = icmp eq i64 %inc, %n
238 br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !6
240 exit: ; preds = %for.body
245 ; Test that we can vectorize a separate FP induction variable (not used for the branch)
247 ; for (long long i = 0; i < n; i++) {
251 ; with an unroll factor (interleave count) of 1.
253 define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) {
254 ; CHECK-LABEL: @add_unique_indf32(
256 ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
257 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
258 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]]
259 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
261 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
262 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
263 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
264 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
265 ; CHECK-NEXT: [[DOTCAST:%.*]] = sitofp i64 [[N_VEC]] to float
266 ; CHECK-NEXT: [[TMP4:%.*]] = fmul float [[DOTCAST]], 2.000000e+00
267 ; CHECK-NEXT: [[IND_END:%.*]] = fadd float [[TMP4]], 0.000000e+00
268 ; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
269 ; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 2
270 ; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
271 ; CHECK-NEXT: [[TMP8:%.*]] = uitofp <vscale x 4 x i32> [[TMP7]] to <vscale x 4 x float>
272 ; CHECK-NEXT: [[TMP9:%.*]] = fmul <vscale x 4 x float> [[TMP8]], splat (float 2.000000e+00)
273 ; CHECK-NEXT: [[INDUCTION:%.*]] = fadd <vscale x 4 x float> [[TMP9]], zeroinitializer
274 ; CHECK-NEXT: [[TMP12:%.*]] = uitofp i64 [[TMP6]] to float
275 ; CHECK-NEXT: [[TMP13:%.*]] = fmul float [[TMP12]], 2.000000e+00
276 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[TMP13]], i64 0
277 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
278 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
279 ; CHECK: vector.body:
280 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
281 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x float> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
282 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
283 ; CHECK-NEXT: store <vscale x 4 x float> [[VEC_IND]], ptr [[TMP14]], align 4
284 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
285 ; CHECK-NEXT: [[VEC_IND_NEXT]] = fadd <vscale x 4 x float> [[VEC_IND]], [[DOTSPLAT]]
286 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
287 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
288 ; CHECK: middle.block:
289 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
290 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
292 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
293 ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi float [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY]] ]
294 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
296 ; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
297 ; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
298 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[I_08]]
299 ; CHECK-NEXT: store float [[R_07]], ptr [[ARRAYIDX]], align 4
300 ; CHECK-NEXT: [[ADD]] = fadd float [[R_07]], 2.000000e+00
301 ; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
302 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
303 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
305 ; CHECK-NEXT: ret void
310 for.body: ; preds = %entry, %for.body
311 %i.08 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
312 %r.07 = phi float [ %add, %for.body ], [ 0.000000e+00, %entry ]
313 %arrayidx = getelementptr inbounds float, ptr %a, i64 %i.08
314 store float %r.07, ptr %arrayidx, align 4
315 %add = fadd float %r.07, 2.000000e+00
316 %inc = add nuw nsw i64 %i.08, 1
317 %exitcond.not = icmp eq i64 %inc, %n
318 br i1 %exitcond.not, label %exit, label %for.body, !llvm.loop !6
320 exit: ; preds = %for.body
324 !0 = distinct !{!0, !1, !2, !3, !4, !5}
325 !1 = !{!"llvm.loop.mustprogress"}
326 !2 = !{!"llvm.loop.vectorize.width", i32 2}
327 !3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
328 !4 = !{!"llvm.loop.interleave.count", i32 2}
329 !5 = !{!"llvm.loop.vectorize.enable", i1 true}
330 !6 = distinct !{!6, !1, !7, !3, !8, !5}
331 !7 = !{!"llvm.loop.vectorize.width", i32 4}
332 !8 = !{!"llvm.loop.interleave.count", i32 1}
333 !9 = distinct !{!9, !1, !10, !3, !4, !5}
334 !10 = !{!"llvm.loop.vectorize.width", i32 1}