1 ; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
2 ; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
3 ; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
4 ; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
7 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
8 target triple = "x86_64-unknown-linux-gnu"
10 @A = global [1024 x float] zeroinitializer, align 128
11 @B = global [1024 x i8] zeroinitializer, align 128
13 ; CHECK: LV: Checking a loop in "test"
15 ; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load float, float* %in0, align 4
16 ; SSE2: LV: Found an estimated cost of 12 for VF 2 For instruction: %v0 = load float, float* %in0, align 4
17 ; SSE2: LV: Found an estimated cost of 28 for VF 4 For instruction: %v0 = load float, float* %in0, align 4
18 ; SSE2: LV: Found an estimated cost of 56 for VF 8 For instruction: %v0 = load float, float* %in0, align 4
19 ; SSE2: LV: Found an estimated cost of 112 for VF 16 For instruction: %v0 = load float, float* %in0, align 4
21 ; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load float, float* %in0, align 4
22 ; AVX1: LV: Found an estimated cost of 15 for VF 2 For instruction: %v0 = load float, float* %in0, align 4
23 ; AVX1: LV: Found an estimated cost of 34 for VF 4 For instruction: %v0 = load float, float* %in0, align 4
24 ; AVX1: LV: Found an estimated cost of 76 for VF 8 For instruction: %v0 = load float, float* %in0, align 4
25 ; AVX1: LV: Found an estimated cost of 152 for VF 16 For instruction: %v0 = load float, float* %in0, align 4
26 ; AVX1: LV: Found an estimated cost of 304 for VF 32 For instruction: %v0 = load float, float* %in0, align 4
28 ; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load float, float* %in0, align 4
29 ; AVX2: LV: Found an estimated cost of 5 for VF 2 For instruction: %v0 = load float, float* %in0, align 4
30 ; AVX2: LV: Found an estimated cost of 10 for VF 4 For instruction: %v0 = load float, float* %in0, align 4
31 ; AVX2: LV: Found an estimated cost of 20 for VF 8 For instruction: %v0 = load float, float* %in0, align 4
32 ; AVX2: LV: Found an estimated cost of 40 for VF 16 For instruction: %v0 = load float, float* %in0, align 4
33 ; AVX2: LV: Found an estimated cost of 84 for VF 32 For instruction: %v0 = load float, float* %in0, align 4
35 ; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load float, float* %in0, align 4
36 ; AVX512: LV: Found an estimated cost of 5 for VF 2 For instruction: %v0 = load float, float* %in0, align 4
37 ; AVX512: LV: Found an estimated cost of 5 for VF 4 For instruction: %v0 = load float, float* %in0, align 4
38 ; AVX512: LV: Found an estimated cost of 8 for VF 8 For instruction: %v0 = load float, float* %in0, align 4
39 ; AVX512: LV: Found an estimated cost of 22 for VF 16 For instruction: %v0 = load float, float* %in0, align 4
40 ; AVX512: LV: Found an estimated cost of 92 for VF 32 For instruction: %v0 = load float, float* %in0, align 4
42 ; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction: %v0 = load float, float* %in0, align 4
49 %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
51 %iv.0 = add nuw nsw i64 %iv, 0
52 %iv.1 = add nuw nsw i64 %iv, 1
53 %iv.2 = add nuw nsw i64 %iv, 2
54 %iv.3 = add nuw nsw i64 %iv, 3
56 %in0 = getelementptr inbounds [1024 x float], [1024 x float]* @A, i64 0, i64 %iv.0
57 %in1 = getelementptr inbounds [1024 x float], [1024 x float]* @A, i64 0, i64 %iv.1
58 %in2 = getelementptr inbounds [1024 x float], [1024 x float]* @A, i64 0, i64 %iv.2
59 %in3 = getelementptr inbounds [1024 x float], [1024 x float]* @A, i64 0, i64 %iv.3
61 %v0 = load float, float* %in0
62 %v1 = load float, float* %in1
63 %v2 = load float, float* %in2
64 %v3 = load float, float* %in3
66 %reduce.add.0 = fadd float %v0, %v1
67 %reduce.add.1 = fadd float %reduce.add.0, %v2
68 %reduce.add.2 = fadd float %reduce.add.1, %v3
70 %reduce.add.2.narrow = fptoui float %reduce.add.2 to i8
72 %out = getelementptr inbounds [1024 x i8], [1024 x i8]* @B, i64 0, i64 %iv.0
73 store i8 %reduce.add.2.narrow, i8* %out
75 %iv.next = add nuw nsw i64 %iv.0, 4
76 %cmp = icmp ult i64 %iv.next, 1024
77 br i1 %cmp, label %for.body, label %for.cond.cleanup