2 ; Loop has a short trip count of 8 iterations. It should be vectorized because no runtime checks or tail loop are necessary.
3 ; Two cases tested AVX (MaxVF=8 = TripCount) and AVX512 (MaxVF=16 > TripCount)
5 ; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -S | FileCheck %s
6 ; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=skylake-avx512 -S | FileCheck %s
8 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
9 target triple = "x86_64-apple-macosx10.8.0"
11 define void @small_tc(float* noalias nocapture %A, float* noalias nocapture readonly %B) {
12 ; CHECK-LABEL: @small_tc
13 ; CHECK: load <8 x float>, <8 x float>*
14 ; CHECK: fadd fast <8 x float>
19 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
20 %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
21 %0 = load float, float* %arrayidx, align 4, !llvm.access.group !5
22 %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
23 %1 = load float, float* %arrayidx2, align 4, !llvm.access.group !5
24 %add = fadd fast float %0, %1
25 store float %add, float* %arrayidx2, align 4, !llvm.access.group !5
26 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
27 %exitcond = icmp eq i64 %indvars.iv.next, 8
28 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !4
34 !3 = !{!3, !{!"llvm.loop.parallel_accesses", !5}}