1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -S | FileCheck %s
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-apple-macosx10.8.0"
7 define i32 @conversion_cost1(i32 %n, ptr nocapture %A, ptr nocapture %B) nounwind uwtable ssp {
8 ; CHECK-LABEL: @conversion_cost1(
9 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[N:%.*]], 3
10 ; CHECK-NEXT: br i1 [[TMP1]], label [[ITER_CHECK:%.*]], label [[DOT_CRIT_EDGE:%.*]]
12 ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[N]], -3
13 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
14 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 16
15 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
16 ; CHECK: vector.main.loop.iter.check:
17 ; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP3]], 32
18 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
20 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 32
21 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
22 ; CHECK-NEXT: [[IND_END:%.*]] = add i64 3, [[N_VEC]]
23 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
25 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
26 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <32 x i8> [ <i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
27 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX]]
28 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0
29 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[TMP4]]
30 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 0
31 ; CHECK-NEXT: store <32 x i8> [[VEC_IND]], ptr [[TMP6]], align 1
32 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
33 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <32 x i8> [[VEC_IND]], <i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32, i8 32>
34 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
35 ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
36 ; CHECK: middle.block:
37 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
38 ; CHECK-NEXT: br i1 [[CMP_N]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
39 ; CHECK: vec.epilog.iter.check:
40 ; CHECK-NEXT: [[IND_END5:%.*]] = add i64 3, [[N_VEC]]
41 ; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[TMP3]], [[N_VEC]]
42 ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 16
43 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
44 ; CHECK: vec.epilog.ph:
45 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[VEC_EPILOG_ITER_CHECK]] ], [ 3, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
46 ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
47 ; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[TMP3]], 16
48 ; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF2]]
49 ; CHECK-NEXT: [[IND_END4:%.*]] = add i64 3, [[N_VEC3]]
50 ; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[BC_RESUME_VAL]] to i8
51 ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[TMP8]], i64 0
52 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
53 ; CHECK-NEXT: [[INDUCTION:%.*]] = add <16 x i8> [[DOTSPLAT]], <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>
54 ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
55 ; CHECK: vec.epilog.vector.body:
56 ; CHECK-NEXT: [[INDEX8:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT12:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
57 ; CHECK-NEXT: [[VEC_IND9:%.*]] = phi <16 x i8> [ [[INDUCTION]], [[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT10:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
58 ; CHECK-NEXT: [[OFFSET_IDX11:%.*]] = add i64 3, [[INDEX8]]
59 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX11]], 0
60 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]]
61 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 0
62 ; CHECK-NEXT: store <16 x i8> [[VEC_IND9]], ptr [[TMP11]], align 1
63 ; CHECK-NEXT: [[INDEX_NEXT12]] = add nuw i64 [[INDEX8]], 16
64 ; CHECK-NEXT: [[VEC_IND_NEXT10]] = add <16 x i8> [[VEC_IND9]], <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
65 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT12]], [[N_VEC3]]
66 ; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
67 ; CHECK: vec.epilog.middle.block:
68 ; CHECK-NEXT: [[CMP_N7:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC3]]
69 ; CHECK-NEXT: br i1 [[CMP_N7]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[VEC_EPILOG_SCALAR_PH]]
70 ; CHECK: vec.epilog.scalar.ph:
71 ; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i64 [ [[IND_END4]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END5]], [[VEC_EPILOG_ITER_CHECK]] ], [ 3, [[ITER_CHECK]] ]
72 ; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
74 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL6]], [[VEC_EPILOG_SCALAR_PH]] ]
75 ; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV]] to i8
76 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]]
77 ; CHECK-NEXT: store i8 [[TMP13]], ptr [[TMP14]], align 1
78 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
79 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
80 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
81 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP4:![0-9]+]]
82 ; CHECK: ._crit_edge.loopexit:
83 ; CHECK-NEXT: br label [[DOT_CRIT_EDGE]]
85 ; CHECK-NEXT: ret i32 undef
87 %1 = icmp sgt i32 %n, 3
88 br i1 %1, label %.lr.ph, label %._crit_edge
90 .lr.ph: ; preds = %0, %.lr.ph
91 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 3, %0 ]
92 %2 = trunc i64 %indvars.iv to i8
93 %3 = getelementptr inbounds i8, ptr %A, i64 %indvars.iv
94 store i8 %2, ptr %3, align 1
95 %indvars.iv.next = add i64 %indvars.iv, 1
96 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
97 %exitcond = icmp eq i32 %lftr.wideiv, %n
98 br i1 %exitcond, label %._crit_edge, label %.lr.ph
100 ._crit_edge: ; preds = %.lr.ph, %0
104 define i32 @conversion_cost2(i32 %n, ptr nocapture %A, ptr nocapture %B) nounwind uwtable ssp {
105 ; CHECK-LABEL: @conversion_cost2(
106 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[N:%.*]], 9
107 ; CHECK-NEXT: br i1 [[TMP1]], label [[DOTLR_PH_PREHEADER:%.*]], label [[DOT_CRIT_EDGE:%.*]]
108 ; CHECK: .lr.ph.preheader:
109 ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[N]], -9
110 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
111 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 8
112 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
114 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 8
115 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
116 ; CHECK-NEXT: [[IND_END:%.*]] = add i64 9, [[N_VEC]]
117 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
118 ; CHECK: vector.body:
119 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
120 ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 9, i64 10>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
121 ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], <i64 2, i64 2>
122 ; CHECK-NEXT: [[STEP_ADD1:%.*]] = add <2 x i64> [[STEP_ADD]], <i64 2, i64 2>
123 ; CHECK-NEXT: [[STEP_ADD2:%.*]] = add <2 x i64> [[STEP_ADD1]], <i64 2, i64 2>
124 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 9, [[INDEX]]
125 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0
126 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 2
127 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 4
128 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 6
129 ; CHECK-NEXT: [[TMP8:%.*]] = add nsw <2 x i64> [[VEC_IND]], <i64 3, i64 3>
130 ; CHECK-NEXT: [[TMP9:%.*]] = add nsw <2 x i64> [[STEP_ADD]], <i64 3, i64 3>
131 ; CHECK-NEXT: [[TMP10:%.*]] = add nsw <2 x i64> [[STEP_ADD1]], <i64 3, i64 3>
132 ; CHECK-NEXT: [[TMP11:%.*]] = add nsw <2 x i64> [[STEP_ADD2]], <i64 3, i64 3>
133 ; CHECK-NEXT: [[TMP12:%.*]] = sitofp <2 x i64> [[TMP8]] to <2 x float>
134 ; CHECK-NEXT: [[TMP13:%.*]] = sitofp <2 x i64> [[TMP9]] to <2 x float>
135 ; CHECK-NEXT: [[TMP14:%.*]] = sitofp <2 x i64> [[TMP10]] to <2 x float>
136 ; CHECK-NEXT: [[TMP15:%.*]] = sitofp <2 x i64> [[TMP11]] to <2 x float>
137 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[TMP4]]
138 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP5]]
139 ; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP6]]
140 ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]]
141 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 0
142 ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 2
143 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 4
144 ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 6
145 ; CHECK-NEXT: store <2 x float> [[TMP12]], ptr [[TMP20]], align 4
146 ; CHECK-NEXT: store <2 x float> [[TMP13]], ptr [[TMP21]], align 4
147 ; CHECK-NEXT: store <2 x float> [[TMP14]], ptr [[TMP22]], align 4
148 ; CHECK-NEXT: store <2 x float> [[TMP15]], ptr [[TMP23]], align 4
149 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
150 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD2]], <i64 2, i64 2>
151 ; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
152 ; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
153 ; CHECK: middle.block:
154 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
155 ; CHECK-NEXT: br i1 [[CMP_N]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[SCALAR_PH]]
157 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 9, [[DOTLR_PH_PREHEADER]] ]
158 ; CHECK-NEXT: br label [[DOTLR_PH:%.*]]
160 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
161 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[INDVARS_IV]], 3
162 ; CHECK-NEXT: [[TOFP:%.*]] = sitofp i64 [[ADD]] to float
163 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
164 ; CHECK-NEXT: store float [[TOFP]], ptr [[GEP]], align 4
165 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
166 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
167 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]]
168 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP6:![0-9]+]]
169 ; CHECK: ._crit_edge.loopexit:
170 ; CHECK-NEXT: br label [[DOT_CRIT_EDGE]]
171 ; CHECK: ._crit_edge:
172 ; CHECK-NEXT: ret i32 undef
174 %1 = icmp sgt i32 %n, 9
175 br i1 %1, label %.lr.ph, label %._crit_edge
177 .lr.ph: ; preds = %0, %.lr.ph
178 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
179 %add = add nsw i64 %indvars.iv, 3
180 %tofp = sitofp i64 %add to float
181 %gep = getelementptr inbounds float, ptr %B, i64 %indvars.iv
182 store float %tofp, ptr %gep, align 4
183 %indvars.iv.next = add i64 %indvars.iv, 1
184 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
185 %exitcond = icmp eq i32 %lftr.wideiv, %n
186 br i1 %exitcond, label %._crit_edge, label %.lr.ph
188 ._crit_edge: ; preds = %.lr.ph, %0