1 ; RUN: opt -S -passes=loop-vectorize -debug-only=loop-vectorize -mattr=avx512fp16 %s 2>&1 | FileCheck %s
3 target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
4 target triple = "i386-unknown-linux-gnu"
6 @src = common local_unnamed_addr global [120 x half] zeroinitializer, align 4
7 @dst = common local_unnamed_addr global [120 x half] zeroinitializer, align 4
9 ; Function Attrs: norecurse nounwind
10 define void @stride8(half %k, i32 %width_) {
13 ; CHECK: Found an estimated cost of 148 for VF 32 For instruction: %0 = load half
15 %cmp72 = icmp sgt i32 %width_, 0
16 br i1 %cmp72, label %for.body.lr.ph, label %for.cond.cleanup
18 for.body.lr.ph: ; preds = %entry
21 for.cond.cleanup.loopexit: ; preds = %for.body
22 br label %for.cond.cleanup
24 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
27 for.body: ; preds = %for.body.lr.ph, %for.body
28 %i.073 = phi i32 [ 0, %for.body.lr.ph ], [ %add46, %for.body ]
29 %arrayidx = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %i.073
30 %0 = load half, ptr %arrayidx, align 4
31 %mul = fmul fast half %0, %k
32 %arrayidx2 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %i.073
33 %1 = load half, ptr %arrayidx2, align 4
34 %add3 = fadd fast half %1, %mul
35 store half %add3, ptr %arrayidx2, align 4
36 %add4 = or disjoint i32 %i.073, 1
37 %arrayidx5 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add4
38 %2 = load half, ptr %arrayidx5, align 4
39 %mul6 = fmul fast half %2, %k
40 %arrayidx8 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add4
41 %3 = load half, ptr %arrayidx8, align 4
42 %add9 = fadd fast half %3, %mul6
43 store half %add9, ptr %arrayidx8, align 4
44 %add10 = or disjoint i32 %i.073, 2
45 %arrayidx11 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add10
46 %4 = load half, ptr %arrayidx11, align 4
47 %mul12 = fmul fast half %4, %k
48 %arrayidx14 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add10
49 %5 = load half, ptr %arrayidx14, align 4
50 %add15 = fadd fast half %5, %mul12
51 store half %add15, ptr %arrayidx14, align 4
52 %add16 = or disjoint i32 %i.073, 3
53 %arrayidx17 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add16
54 %6 = load half, ptr %arrayidx17, align 4
55 %mul18 = fmul fast half %6, %k
56 %arrayidx20 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add16
57 %7 = load half, ptr %arrayidx20, align 4
58 %add21 = fadd fast half %7, %mul18
59 store half %add21, ptr %arrayidx20, align 4
60 %add22 = or disjoint i32 %i.073, 4
61 %arrayidx23 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add22
62 %8 = load half, ptr %arrayidx23, align 4
63 %mul24 = fmul fast half %8, %k
64 %arrayidx26 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add22
65 %9 = load half, ptr %arrayidx26, align 4
66 %add27 = fadd fast half %9, %mul24
67 store half %add27, ptr %arrayidx26, align 4
68 %add28 = or disjoint i32 %i.073, 5
69 %arrayidx29 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add28
70 %10 = load half, ptr %arrayidx29, align 4
71 %mul30 = fmul fast half %10, %k
72 %arrayidx32 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add28
73 %11 = load half, ptr %arrayidx32, align 4
74 %add33 = fadd fast half %11, %mul30
75 store half %add33, ptr %arrayidx32, align 4
76 %add34 = or disjoint i32 %i.073, 6
77 %arrayidx35 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add34
78 %12 = load half, ptr %arrayidx35, align 4
79 %mul36 = fmul fast half %12, %k
80 %arrayidx38 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add34
81 %13 = load half, ptr %arrayidx38, align 4
82 %add39 = fadd fast half %13, %mul36
83 store half %add39, ptr %arrayidx38, align 4
84 %add40 = or disjoint i32 %i.073, 7
85 %arrayidx41 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add40
86 %14 = load half, ptr %arrayidx41, align 4
87 %mul42 = fmul fast half %14, %k
88 %arrayidx44 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add40
89 %15 = load half, ptr %arrayidx44, align 4
90 %add45 = fadd fast half %15, %mul42
91 store half %add45, ptr %arrayidx44, align 4
92 %add46 = add nuw nsw i32 %i.073, 8
93 %cmp = icmp slt i32 %add46, %width_
94 br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
97 ; Function Attrs: norecurse nounwind
98 define void @stride3(half %k, i32 %width_) {
101 ; CHECK: Found an estimated cost of 18 for VF 32 For instruction: %0 = load half
103 %cmp27 = icmp sgt i32 %width_, 0
104 br i1 %cmp27, label %for.body.lr.ph, label %for.cond.cleanup
106 for.body.lr.ph: ; preds = %entry
109 for.cond.cleanup: ; preds = %for.body, %entry
112 for.body: ; preds = %for.body.lr.ph, %for.body
113 %i.028 = phi i32 [ 0, %for.body.lr.ph ], [ %add16, %for.body ]
114 %arrayidx = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %i.028
115 %0 = load half, ptr %arrayidx, align 4
116 %mul = fmul fast half %0, %k
117 %arrayidx2 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %i.028
118 %1 = load half, ptr %arrayidx2, align 4
119 %add3 = fadd fast half %1, %mul
120 store half %add3, ptr %arrayidx2, align 4
121 %add4 = add nuw nsw i32 %i.028, 1
122 %arrayidx5 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add4
123 %2 = load half, ptr %arrayidx5, align 4
124 %mul6 = fmul fast half %2, %k
125 %arrayidx8 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add4
126 %3 = load half, ptr %arrayidx8, align 4
127 %add9 = fadd fast half %3, %mul6
128 store half %add9, ptr %arrayidx8, align 4
129 %add10 = add nuw nsw i32 %i.028, 2
130 %arrayidx11 = getelementptr inbounds [120 x half], ptr @src, i32 0, i32 %add10
131 %4 = load half, ptr %arrayidx11, align 4
132 %mul12 = fmul fast half %4, %k
133 %arrayidx14 = getelementptr inbounds [120 x half], ptr @dst, i32 0, i32 %add10
134 %5 = load half, ptr %arrayidx14, align 4
135 %add15 = fadd fast half %5, %mul12
136 store half %add15, ptr %arrayidx14, align 4
137 %add16 = add nuw nsw i32 %i.028, 3
138 %cmp = icmp slt i32 %add16, %width_
139 br i1 %cmp, label %for.body, label %for.cond.cleanup