1 ; RUN: opt < %s -passes=loop-vectorize -prefer-predicate-over-epilogue=scalar-epilogue -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize \
2 ; RUN: -pass-remarks-missed=loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve,+bf16 -S 2>%t | FileCheck %s -check-prefix=CHECK
3 ; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARK
5 ; Reduction can be vectorized
9 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
10 define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
13 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
14 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
15 ; CHECK: %[[ADD1:.*]] = add <vscale x 8 x i32> %[[LOAD1]]
16 ; CHECK: %[[ADD2:.*]] = add <vscale x 8 x i32> %[[LOAD2]]
17 ; CHECK: middle.block:
18 ; CHECK: %[[ADD:.*]] = add <vscale x 8 x i32> %[[ADD2]], %[[ADD1]]
19 ; CHECK-NEXT: call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> %[[ADD]])
23 for.body: ; preds = %entry, %for.body
24 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
25 %sum.07 = phi i32 [ 2, %entry ], [ %add, %for.body ]
26 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
27 %0 = load i32, ptr %arrayidx, align 4
28 %add = add nsw i32 %0, %sum.07
29 %iv.next = add nuw nsw i64 %iv, 1
30 %exitcond.not = icmp eq i64 %iv.next, %n
31 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
33 for.end: ; preds = %for.body, %entry
39 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
40 define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
43 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
44 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
45 ; CHECK: %[[OR1:.*]] = or <vscale x 8 x i32> %[[LOAD1]]
46 ; CHECK: %[[OR2:.*]] = or <vscale x 8 x i32> %[[LOAD2]]
47 ; CHECK: middle.block:
48 ; CHECK: %[[OR:.*]] = or <vscale x 8 x i32> %[[OR2]], %[[OR1]]
49 ; CHECK-NEXT: call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> %[[OR]])
53 for.body: ; preds = %entry, %for.body
54 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
55 %sum.07 = phi i32 [ 2, %entry ], [ %or, %for.body ]
56 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
57 %0 = load i32, ptr %arrayidx, align 4
58 %or = or i32 %0, %sum.07
59 %iv.next = add nuw nsw i64 %iv, 1
60 %exitcond.not = icmp eq i64 %iv.next, %n
61 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
63 for.end: ; preds = %for.body, %entry
69 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
70 define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
73 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
74 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
75 ; CHECK: %[[AND1:.*]] = and <vscale x 8 x i32> %[[LOAD1]]
76 ; CHECK: %[[AND2:.*]] = and <vscale x 8 x i32> %[[LOAD2]]
77 ; CHECK: middle.block:
78 ; CHECK: %[[ABD:.*]] = and <vscale x 8 x i32> %[[ADD2]], %[[AND1]]
79 ; CHECK-NEXT: call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> %[[ADD]])
83 for.body: ; preds = %entry, %for.body
84 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
85 %sum.07 = phi i32 [ 2, %entry ], [ %and, %for.body ]
86 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
87 %0 = load i32, ptr %arrayidx, align 4
88 %and = and i32 %0, %sum.07
89 %iv.next = add nuw nsw i64 %iv, 1
90 %exitcond.not = icmp eq i64 %iv.next, %n
91 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
93 for.end: ; preds = %for.body, %entry
99 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
100 define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
102 ; CHECK: vector.body:
103 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
104 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
105 ; CHECK: %[[XOR1:.*]] = xor <vscale x 8 x i32> %[[LOAD1]]
106 ; CHECK: %[[XOR2:.*]] = xor <vscale x 8 x i32> %[[LOAD2]]
107 ; CHECK: middle.block:
108 ; CHECK: %[[XOR:.*]] = xor <vscale x 8 x i32> %[[XOR2]], %[[XOR1]]
109 ; CHECK-NEXT: call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> %[[XOR]])
113 for.body: ; preds = %entry, %for.body
114 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
115 %sum.07 = phi i32 [ 2, %entry ], [ %xor, %for.body ]
116 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
117 %0 = load i32, ptr %arrayidx, align 4
118 %xor = xor i32 %0, %sum.07
119 %iv.next = add nuw nsw i64 %iv, 1
120 %exitcond.not = icmp eq i64 %iv.next, %n
121 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
123 for.end: ; preds = %for.body, %entry
127 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
130 define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
132 ; CHECK: vector.body:
133 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
134 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
135 ; CHECK: %[[ICMP1:.*]] = icmp slt <vscale x 8 x i32> %[[LOAD1]]
136 ; CHECK: %[[ICMP2:.*]] = icmp slt <vscale x 8 x i32> %[[LOAD2]]
137 ; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[ICMP1]], <vscale x 8 x i32> %[[LOAD1]]
138 ; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[ICMP2]], <vscale x 8 x i32> %[[LOAD2]]
139 ; CHECK: middle.block:
140 ; CHECK: %[[RDX:.*]] = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> %[[SEL1]], <vscale x 8 x i32> %[[SEL2]])
141 ; CHECK-NEXT: call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> %[[RDX]])
145 for.body: ; preds = %entry, %for.body
146 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
147 %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
148 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
149 %0 = load i32, ptr %arrayidx, align 4
150 %cmp.i = icmp slt i32 %0, %sum.010
151 %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
152 %iv.next = add nuw nsw i64 %iv, 1
153 %exitcond.not = icmp eq i64 %iv.next, %n
154 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
157 ret i32 %.sroa.speculated
160 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
163 define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
165 ; CHECK: vector.body:
166 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
167 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
168 ; CHECK: %[[ICMP1:.*]] = icmp ugt <vscale x 8 x i32> %[[LOAD1]]
169 ; CHECK: %[[ICMP2:.*]] = icmp ugt <vscale x 8 x i32> %[[LOAD2]]
170 ; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[ICMP1]], <vscale x 8 x i32> %[[LOAD1]]
171 ; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[ICMP2]], <vscale x 8 x i32> %[[LOAD2]]
172 ; CHECK: middle.block:
173 ; CHECK: %[[RDX:.*]] = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> %[[SEL1]], <vscale x 8 x i32> %[[SEL2]])
174 ; CHECK-NEXT: call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> %[[RDX]])
178 for.body: ; preds = %entry, %for.body
179 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
180 %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
181 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
182 %0 = load i32, ptr %arrayidx, align 4
183 %cmp.i = icmp ugt i32 %0, %sum.010
184 %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
185 %iv.next = add nuw nsw i64 %iv, 1
186 %exitcond.not = icmp eq i64 %iv.next, %n
187 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
190 ret i32 %.sroa.speculated
193 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
196 define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
197 ; CHECK-LABEL: @fadd_fast
198 ; CHECK: vector.body:
199 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
200 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
201 ; CHECK: %[[ADD1:.*]] = fadd fast <vscale x 8 x float> %[[LOAD1]]
202 ; CHECK: %[[ADD2:.*]] = fadd fast <vscale x 8 x float> %[[LOAD2]]
203 ; CHECK: middle.block:
204 ; CHECK: %[[ADD:.*]] = fadd fast <vscale x 8 x float> %[[ADD2]], %[[ADD1]]
205 ; CHECK-NEXT: call fast float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> %[[ADD]])
210 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
211 %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
212 %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
213 %0 = load float, ptr %arrayidx, align 4
214 %add = fadd fast float %0, %sum.07
215 %iv.next = add nuw nsw i64 %iv, 1
216 %exitcond.not = icmp eq i64 %iv.next, %n
217 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
223 ; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
224 ; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2)
225 define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) {
226 ; CHECK-LABEL: @fadd_fast_bfloat
227 ; CHECK: vector.body:
228 ; CHECK: %[[LOAD1:.*]] = load <8 x bfloat>
229 ; CHECK: %[[LOAD2:.*]] = load <8 x bfloat>
230 ; CHECK: %[[FADD1:.*]] = fadd fast <8 x bfloat> %[[LOAD1]]
231 ; CHECK: %[[FADD2:.*]] = fadd fast <8 x bfloat> %[[LOAD2]]
232 ; CHECK: middle.block:
233 ; CHECK: %[[RDX:.*]] = fadd fast <8 x bfloat> %[[FADD2]], %[[FADD1]]
234 ; CHECK: call fast bfloat @llvm.vector.reduce.fadd.v8bf16(bfloat 0xR8000, <8 x bfloat> %[[RDX]])
239 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
240 %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %add, %for.body ]
241 %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
242 %0 = load bfloat, ptr %arrayidx, align 4
243 %add = fadd fast bfloat %0, %sum.07
244 %iv.next = add nuw nsw i64 %iv, 1
245 %exitcond.not = icmp eq i64 %iv.next, %n
246 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
254 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
255 define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
256 ; CHECK-LABEL: @fmin_fast
257 ; CHECK: vector.body:
258 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
259 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
260 ; CHECK: %[[FCMP1:.*]] = fcmp fast olt <vscale x 8 x float> %[[LOAD1]]
261 ; CHECK: %[[FCMP2:.*]] = fcmp fast olt <vscale x 8 x float> %[[LOAD2]]
262 ; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x float> %[[LOAD1]]
263 ; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x float> %[[LOAD2]]
264 ; CHECK: middle.block:
265 ; CHECK: %[[FCMP:.*]] = fcmp fast olt <vscale x 8 x float> %[[SEL1]], %[[SEL2]]
266 ; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x float> %[[SEL1]], <vscale x 8 x float> %[[SEL2]]
267 ; CHECK-NEXT: call fast float @llvm.vector.reduce.fmin.nxv8f32(<vscale x 8 x float> %[[SEL]])
272 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
273 %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
274 %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
275 %0 = load float, ptr %arrayidx, align 4
276 %cmp.i = fcmp fast olt float %0, %sum.07
277 %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
278 %iv.next = add nuw nsw i64 %iv, 1
279 %exitcond.not = icmp eq i64 %iv.next, %n
280 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
283 ret float %.sroa.speculated
288 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
289 define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
290 ; CHECK-LABEL: @fmax_fast
291 ; CHECK: vector.body:
292 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
293 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
294 ; CHECK: %[[FCMP1:.*]] = fcmp fast ogt <vscale x 8 x float> %[[LOAD1]]
295 ; CHECK: %[[FCMP2:.*]] = fcmp fast ogt <vscale x 8 x float> %[[LOAD2]]
296 ; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x float> %[[LOAD1]]
297 ; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x float> %[[LOAD2]]
298 ; CHECK: middle.block:
299 ; CHECK: %[[FCMP:.*]] = fcmp fast ogt <vscale x 8 x float> %[[SEL1]], %[[SEL2]]
300 ; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x float> %[[SEL1]], <vscale x 8 x float> %[[SEL2]]
301 ; CHECK-NEXT: call fast float @llvm.vector.reduce.fmax.nxv8f32(<vscale x 8 x float> %[[SEL]])
306 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
307 %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
308 %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
309 %0 = load float, ptr %arrayidx, align 4
310 %cmp.i = fcmp fast ogt float %0, %sum.07
311 %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
312 %iv.next = add nuw nsw i64 %iv, 1
313 %exitcond.not = icmp eq i64 %iv.next, %n
314 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
317 ret float %.sroa.speculated
320 ; ADD (with reduction stored in invariant address)
322 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 4, interleaved count: 2)
323 define void @invariant_store(ptr %dst, ptr readonly %src) {
324 ; CHECK-LABEL: @invariant_store
325 ; CHECK: vector.body:
326 ; CHECK: %[[LOAD1:.*]] = load <vscale x 4 x i32>
327 ; CHECK: %[[LOAD2:.*]] = load <vscale x 4 x i32>
328 ; CHECK: %[[ADD1:.*]] = add <vscale x 4 x i32> %{{.*}}, %[[LOAD1]]
329 ; CHECK: %[[ADD2:.*]] = add <vscale x 4 x i32> %{{.*}}, %[[LOAD2]]
330 ; CHECK: middle.block:
331 ; CHECK: %[[ADD:.*]] = add <vscale x 4 x i32> %[[ADD2]], %[[ADD1]]
332 ; CHECK-NEXT: %[[SUM:.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %[[ADD]])
333 ; CHECK-NEXT: store i32 %[[SUM]], ptr %gep.dst, align 4
335 %gep.dst = getelementptr inbounds i32, ptr %dst, i64 42
336 store i32 0, ptr %gep.dst, align 4
339 %sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
340 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
341 %gep.src = getelementptr inbounds i32, ptr %src, i64 %indvars.iv
342 %0 = load i32, ptr %gep.src, align 4
343 %add = add nsw i32 %sum, %0
344 store i32 %add, ptr %gep.dst, align 4
345 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
346 %exitcond = icmp eq i64 %indvars.iv.next, 1000
347 br i1 %exitcond, label %for.cond.cleanup, label %for.body
353 ; Reduction cannot be vectorized
357 ; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
358 ; CHECK-REMARK: vectorized loop (vectorization width: 4, interleaved count: 2)
359 define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
361 ; CHECK: vector.body:
362 ; CHECK: %[[LOAD1:.*]] = load <4 x i32>
363 ; CHECK: %[[LOAD2:.*]] = load <4 x i32>
364 ; CHECK: %[[MUL1:.*]] = mul <4 x i32> %[[LOAD1]]
365 ; CHECK: %[[MUL2:.*]] = mul <4 x i32> %[[LOAD2]]
366 ; CHECK: middle.block:
367 ; CHECK: %[[RDX:.*]] = mul <4 x i32> %[[MUL2]], %[[MUL1]]
368 ; CHECK: call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %[[RDX]])
372 for.body: ; preds = %entry, %for.body
373 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
374 %sum.07 = phi i32 [ 2, %entry ], [ %mul, %for.body ]
375 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
376 %0 = load i32, ptr %arrayidx, align 4
377 %mul = mul nsw i32 %0, %sum.07
378 %iv.next = add nuw nsw i64 %iv, 1
379 %exitcond.not = icmp eq i64 %iv.next, %n
380 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
382 for.end: ; preds = %for.body, %entry
386 ; Note: This test was added to ensure we always check the legality of reductions (end emit a warning if necessary) before checking for memory dependencies
387 ; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
388 ; CHECK-REMARK: vectorized loop (vectorization width: 4, interleaved count: 2)
389 define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i64 %n) {
390 ; CHECK-LABEL: @memory_dependence
391 ; CHECK: vector.body:
392 ; CHECK: %[[LOAD1:.*]] = load <4 x i32>
393 ; CHECK: %[[LOAD2:.*]] = load <4 x i32>
394 ; CHECK: %[[LOAD3:.*]] = load <4 x i32>
395 ; CHECK: %[[LOAD4:.*]] = load <4 x i32>
396 ; CHECK: %[[ADD1:.*]] = add nsw <4 x i32> %[[LOAD3]], %[[LOAD1]]
397 ; CHECK: %[[ADD2:.*]] = add nsw <4 x i32> %[[LOAD4]], %[[LOAD2]]
398 ; CHECK: %[[MUL1:.*]] = mul <4 x i32> %[[LOAD3]]
399 ; CHECK: %[[MUL2:.*]] = mul <4 x i32> %[[LOAD4]]
400 ; CHECK: middle.block:
401 ; CHECK: %[[RDX:.*]] = mul <4 x i32> %[[MUL2]], %[[MUL1]]
402 ; CHECK: call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %[[RDX]])
407 %i = phi i64 [ %inc, %for.body ], [ 0, %entry ]
408 %sum = phi i32 [ %mul, %for.body ], [ 2, %entry ]
409 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %i
410 %0 = load i32, ptr %arrayidx, align 4
411 %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 %i
412 %1 = load i32, ptr %arrayidx1, align 4
413 %add = add nsw i32 %1, %0
414 %add2 = add nuw nsw i64 %i, 32
415 %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %add2
416 store i32 %add, ptr %arrayidx3, align 4
417 %mul = mul nsw i32 %1, %sum
418 %inc = add nuw nsw i64 %i, 1
419 %exitcond.not = icmp eq i64 %inc, %n
420 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
426 attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
428 !0 = distinct !{!0, !1, !2, !3, !4}
429 !1 = !{!"llvm.loop.vectorize.width", i32 8}
430 !2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
431 !3 = !{!"llvm.loop.interleave.count", i32 2}
432 !4 = !{!"llvm.loop.vectorize.enable", i1 true}