1 ; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on \
2 ; RUN: -riscv-v-vector-bits-min=128 -riscv-v-vector-bits-max=128 \
3 ; RUN: -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize \
4 ; RUN: -pass-remarks-missed=loop-vectorize -mtriple riscv64-linux-gnu \
5 ; RUN: -force-target-max-vector-interleave=2 -mattr=+v,+f -S 2>%t \
6 ; RUN: | FileCheck %s -check-prefix=CHECK
7 ; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARK
9 ; Reduction can be vectorized
13 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
14 define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
17 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
18 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
19 ; CHECK: %[[ADD1:.*]] = add <vscale x 8 x i32> %[[LOAD1]]
20 ; CHECK: %[[ADD2:.*]] = add <vscale x 8 x i32> %[[LOAD2]]
21 ; CHECK: middle.block:
22 ; CHECK: %[[ADD:.*]] = add <vscale x 8 x i32> %[[ADD2]], %[[ADD1]]
23 ; CHECK-NEXT: call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> %[[ADD]])
27 for.body: ; preds = %entry, %for.body
28 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
29 %sum.07 = phi i32 [ 2, %entry ], [ %add, %for.body ]
30 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
31 %0 = load i32, ptr %arrayidx, align 4
32 %add = add nsw i32 %0, %sum.07
33 %iv.next = add nuw nsw i64 %iv, 1
34 %exitcond.not = icmp eq i64 %iv.next, %n
35 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
37 for.end: ; preds = %for.body, %entry
43 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
44 define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
47 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
48 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
49 ; CHECK: %[[OR1:.*]] = or <vscale x 8 x i32> %[[LOAD1]]
50 ; CHECK: %[[OR2:.*]] = or <vscale x 8 x i32> %[[LOAD2]]
51 ; CHECK: middle.block:
52 ; CHECK: %[[OR:.*]] = or <vscale x 8 x i32> %[[OR2]], %[[OR1]]
53 ; CHECK-NEXT: call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> %[[OR]])
57 for.body: ; preds = %entry, %for.body
58 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
59 %sum.07 = phi i32 [ 2, %entry ], [ %or, %for.body ]
60 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
61 %0 = load i32, ptr %arrayidx, align 4
62 %or = or i32 %0, %sum.07
63 %iv.next = add nuw nsw i64 %iv, 1
64 %exitcond.not = icmp eq i64 %iv.next, %n
65 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
67 for.end: ; preds = %for.body, %entry
73 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
74 define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
77 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
78 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
79 ; CHECK: %[[AND1:.*]] = and <vscale x 8 x i32> %[[LOAD1]]
80 ; CHECK: %[[AND2:.*]] = and <vscale x 8 x i32> %[[LOAD2]]
81 ; CHECK: middle.block:
82 ; CHECK: %[[ABD:.*]] = and <vscale x 8 x i32> %[[ADD2]], %[[AND1]]
83 ; CHECK-NEXT: call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> %[[ADD]])
87 for.body: ; preds = %entry, %for.body
88 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
89 %sum.07 = phi i32 [ 2, %entry ], [ %and, %for.body ]
90 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
91 %0 = load i32, ptr %arrayidx, align 4
92 %and = and i32 %0, %sum.07
93 %iv.next = add nuw nsw i64 %iv, 1
94 %exitcond.not = icmp eq i64 %iv.next, %n
95 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
97 for.end: ; preds = %for.body, %entry
103 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
104 define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
106 ; CHECK: vector.body:
107 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
108 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
109 ; CHECK: %[[XOR1:.*]] = xor <vscale x 8 x i32> %[[LOAD1]]
110 ; CHECK: %[[XOR2:.*]] = xor <vscale x 8 x i32> %[[LOAD2]]
111 ; CHECK: middle.block:
112 ; CHECK: %[[XOR:.*]] = xor <vscale x 8 x i32> %[[XOR2]], %[[XOR1]]
113 ; CHECK-NEXT: call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> %[[XOR]])
117 for.body: ; preds = %entry, %for.body
118 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
119 %sum.07 = phi i32 [ 2, %entry ], [ %xor, %for.body ]
120 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
121 %0 = load i32, ptr %arrayidx, align 4
122 %xor = xor i32 %0, %sum.07
123 %iv.next = add nuw nsw i64 %iv, 1
124 %exitcond.not = icmp eq i64 %iv.next, %n
125 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
127 for.end: ; preds = %for.body, %entry
131 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
134 define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
136 ; CHECK: vector.body:
137 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
138 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
139 ; CHECK: %[[ICMP1:.*]] = icmp slt <vscale x 8 x i32> %[[LOAD1]]
140 ; CHECK: %[[ICMP2:.*]] = icmp slt <vscale x 8 x i32> %[[LOAD2]]
141 ; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[ICMP1]], <vscale x 8 x i32> %[[LOAD1]]
142 ; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[ICMP2]], <vscale x 8 x i32> %[[LOAD2]]
143 ; CHECK: middle.block:
144 ; CHECK: %[[RDX:.*]] = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> %[[SEL1]], <vscale x 8 x i32> %[[SEL2]])
145 ; CHECK-NEXT: call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> %[[RDX]])
149 for.body: ; preds = %entry, %for.body
150 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
151 %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
152 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
153 %0 = load i32, ptr %arrayidx, align 4
154 %cmp.i = icmp slt i32 %0, %sum.010
155 %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
156 %iv.next = add nuw nsw i64 %iv, 1
157 %exitcond.not = icmp eq i64 %iv.next, %n
158 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
161 ret i32 %.sroa.speculated
164 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
167 define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
169 ; CHECK: vector.body:
170 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
171 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
172 ; CHECK: %[[ICMP1:.*]] = icmp ugt <vscale x 8 x i32> %[[LOAD1]]
173 ; CHECK: %[[ICMP2:.*]] = icmp ugt <vscale x 8 x i32> %[[LOAD2]]
174 ; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[ICMP1]], <vscale x 8 x i32> %[[LOAD1]]
175 ; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[ICMP2]], <vscale x 8 x i32> %[[LOAD2]]
176 ; CHECK: middle.block:
177 ; CHECK: %[[RDX:.*]] = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> %[[SEL1]], <vscale x 8 x i32> %[[SEL2]])
178 ; CHECK-NEXT: call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> %[[RDX]])
182 for.body: ; preds = %entry, %for.body
183 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
184 %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
185 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
186 %0 = load i32, ptr %arrayidx, align 4
187 %cmp.i = icmp ugt i32 %0, %sum.010
188 %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
189 %iv.next = add nuw nsw i64 %iv, 1
190 %exitcond.not = icmp eq i64 %iv.next, %n
191 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
194 ret i32 %.sroa.speculated
197 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
200 define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
201 ; CHECK-LABEL: @fadd_fast
202 ; CHECK: vector.body:
203 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
204 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
205 ; CHECK: %[[ADD1:.*]] = fadd fast <vscale x 8 x float> %[[LOAD1]]
206 ; CHECK: %[[ADD2:.*]] = fadd fast <vscale x 8 x float> %[[LOAD2]]
207 ; CHECK: middle.block:
208 ; CHECK: %[[ADD:.*]] = fadd fast <vscale x 8 x float> %[[ADD2]], %[[ADD1]]
209 ; CHECK-NEXT: call fast float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, <vscale x 8 x float> %[[ADD]])
214 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
215 %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
216 %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
217 %0 = load float, ptr %arrayidx, align 4
218 %add = fadd fast float %0, %sum.07
219 %iv.next = add nuw nsw i64 %iv, 1
220 %exitcond.not = icmp eq i64 %iv.next, %n
221 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
227 ; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
228 ; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
229 define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) {
230 ; CHECK-LABEL: @fadd_fast_bfloat
231 ; CHECK: vector.body:
232 ; CHECK: %[[LOAD1:.*]] = load <16 x bfloat>
233 ; CHECK: %[[LOAD2:.*]] = load <16 x bfloat>
234 ; CHECK: %[[FADD1:.*]] = fadd fast <16 x bfloat> %[[LOAD1]]
235 ; CHECK: %[[FADD2:.*]] = fadd fast <16 x bfloat> %[[LOAD2]]
236 ; CHECK: middle.block:
237 ; CHECK: %[[RDX:.*]] = fadd fast <16 x bfloat> %[[FADD2]], %[[FADD1]]
238 ; CHECK: call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> %[[RDX]])
243 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
244 %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %add, %for.body ]
245 %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
246 %0 = load bfloat, ptr %arrayidx, align 4
247 %add = fadd fast bfloat %0, %sum.07
248 %iv.next = add nuw nsw i64 %iv, 1
249 %exitcond.not = icmp eq i64 %iv.next, %n
250 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
258 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
259 define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
260 ; CHECK-LABEL: @fmin_fast
261 ; CHECK: vector.body:
262 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
263 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
264 ; CHECK: %[[FCMP1:.*]] = fcmp olt <vscale x 8 x float> %[[LOAD1]]
265 ; CHECK: %[[FCMP2:.*]] = fcmp olt <vscale x 8 x float> %[[LOAD2]]
266 ; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x float> %[[LOAD1]]
267 ; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x float> %[[LOAD2]]
268 ; CHECK: middle.block:
269 ; CHECK: %[[FCMP:.*]] = fcmp olt <vscale x 8 x float> %[[SEL1]], %[[SEL2]]
270 ; CHECK-NEXT: %[[SEL:.*]] = select <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x float> %[[SEL1]], <vscale x 8 x float> %[[SEL2]]
271 ; CHECK-NEXT: call float @llvm.vector.reduce.fmin.nxv8f32(<vscale x 8 x float> %[[SEL]])
276 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
277 %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
278 %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
279 %0 = load float, ptr %arrayidx, align 4
280 %cmp.i = fcmp olt float %0, %sum.07
281 %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
282 %iv.next = add nuw nsw i64 %iv, 1
283 %exitcond.not = icmp eq i64 %iv.next, %n
284 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
287 ret float %.sroa.speculated
292 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
293 define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
294 ; CHECK-LABEL: @fmax_fast
295 ; CHECK: vector.body:
296 ; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
297 ; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
298 ; CHECK: %[[FCMP1:.*]] = fcmp fast ogt <vscale x 8 x float> %[[LOAD1]]
299 ; CHECK: %[[FCMP2:.*]] = fcmp fast ogt <vscale x 8 x float> %[[LOAD2]]
300 ; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x float> %[[LOAD1]]
301 ; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x float> %[[LOAD2]]
302 ; CHECK: middle.block:
303 ; CHECK: %[[FCMP:.*]] = fcmp fast ogt <vscale x 8 x float> %[[SEL1]], %[[SEL2]]
304 ; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x float> %[[SEL1]], <vscale x 8 x float> %[[SEL2]]
305 ; CHECK-NEXT: call fast float @llvm.vector.reduce.fmax.nxv8f32(<vscale x 8 x float> %[[SEL]])
310 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
311 %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
312 %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
313 %0 = load float, ptr %arrayidx, align 4
314 %cmp.i = fcmp fast ogt float %0, %sum.07
315 %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
316 %iv.next = add nuw nsw i64 %iv, 1
317 %exitcond.not = icmp eq i64 %iv.next, %n
318 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
321 ret float %.sroa.speculated
324 ; Reduction cannot be vectorized
328 ; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
329 ; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2)
330 define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
332 ; CHECK: vector.body:
333 ; CHECK: %[[LOAD1:.*]] = load <8 x i32>
334 ; CHECK: %[[LOAD2:.*]] = load <8 x i32>
335 ; CHECK: %[[MUL1:.*]] = mul <8 x i32> %[[LOAD1]]
336 ; CHECK: %[[MUL2:.*]] = mul <8 x i32> %[[LOAD2]]
337 ; CHECK: middle.block:
338 ; CHECK: %[[RDX:.*]] = mul <8 x i32> %[[MUL2]], %[[MUL1]]
339 ; CHECK: call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %[[RDX]])
343 for.body: ; preds = %entry, %for.body
344 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
345 %sum.07 = phi i32 [ 2, %entry ], [ %mul, %for.body ]
346 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
347 %0 = load i32, ptr %arrayidx, align 4
348 %mul = mul nsw i32 %0, %sum.07
349 %iv.next = add nuw nsw i64 %iv, 1
350 %exitcond.not = icmp eq i64 %iv.next, %n
351 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
353 for.end: ; preds = %for.body, %entry
357 ; Note: This test was added to ensure we always check the legality of reductions (and emit a warning if necessary) before checking for memory dependencies
358 ; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
359 ; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2)
360 define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i64 %n) {
361 ; CHECK-LABEL: @memory_dependence
362 ; CHECK: vector.body:
363 ; CHECK: %[[LOAD1:.*]] = load <8 x i32>
364 ; CHECK: %[[LOAD2:.*]] = load <8 x i32>
365 ; CHECK: %[[LOAD3:.*]] = load <8 x i32>
366 ; CHECK: %[[LOAD4:.*]] = load <8 x i32>
367 ; CHECK: %[[ADD1:.*]] = add nsw <8 x i32> %[[LOAD3]], %[[LOAD1]]
368 ; CHECK: %[[ADD2:.*]] = add nsw <8 x i32> %[[LOAD4]], %[[LOAD2]]
369 ; CHECK: %[[MUL1:.*]] = mul <8 x i32> %[[LOAD3]]
370 ; CHECK: %[[MUL2:.*]] = mul <8 x i32> %[[LOAD4]]
371 ; CHECK: middle.block:
372 ; CHECK: %[[RDX:.*]] = mul <8 x i32> %[[MUL2]], %[[MUL1]]
373 ; CHECK: call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %[[RDX]])
378 %i = phi i64 [ %inc, %for.body ], [ 0, %entry ]
379 %sum = phi i32 [ %mul, %for.body ], [ 2, %entry ]
380 %arrayidx = getelementptr inbounds i32, ptr %a, i64 %i
381 %0 = load i32, ptr %arrayidx, align 4
382 %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 %i
383 %1 = load i32, ptr %arrayidx1, align 4
384 %add = add nsw i32 %1, %0
385 %add2 = add nuw nsw i64 %i, 32
386 %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %add2
387 store i32 %add, ptr %arrayidx3, align 4
388 %mul = mul nsw i32 %1, %sum
389 %inc = add nuw nsw i64 %i, 1
390 %exitcond.not = icmp eq i64 %inc, %n
391 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
397 ; CHECK-REMARK: vectorized loop (vectorization width: vscale x 4, interleaved count: 2)
398 define float @fmuladd(ptr %a, ptr %b, i64 %n) {
399 ; CHECK-LABEL: @fmuladd(
400 ; CHECK: vector.body:
401 ; CHECK: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>
402 ; CHECK: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>
403 ; CHECK: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>
404 ; CHECK: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>
405 ; CHECK: [[MULADD1:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD3]],
406 ; CHECK: [[MULADD2:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD2]], <vscale x 4 x float> [[WIDE_LOAD4]],
407 ; CHECK: middle.block:
408 ; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <vscale x 4 x float> [[MULADD2]], [[MULADD1]]
409 ; CHECK: call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[BIN_RDX]])
415 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
416 %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
417 %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
418 %0 = load float, ptr %arrayidx, align 4
419 %arrayidx2 = getelementptr inbounds float, ptr %b, i64 %iv
420 %1 = load float, ptr %arrayidx2, align 4
421 %muladd = tail call reassoc float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
422 %iv.next = add nuw nsw i64 %iv, 1
423 %exitcond.not = icmp eq i64 %iv.next, %n
424 br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
430 declare float @llvm.fmuladd.f32(float, float, float)
432 attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
434 !0 = distinct !{!0, !1, !2, !3, !4}
435 !1 = !{!"llvm.loop.vectorize.width", i32 8}
436 !2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
437 !3 = !{!"llvm.loop.interleave.count", i32 2}
438 !4 = !{!"llvm.loop.vectorize.enable", i1 true}