1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2 ; RUN: opt -p loop-vectorize -force-vector-width=4 -enable-interleaved-mem-accesses=true -S %s | FileCheck %s
4 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128-Fn32"
6 define void @gep_for_first_member_does_not_dominate_insert_point(ptr %str, ptr noalias %dst) {
7 ; CHECK-LABEL: define void @gep_for_first_member_does_not_dominate_insert_point(
8 ; CHECK-SAME: ptr [[STR:%.*]], ptr noalias [[DST:%.*]]) {
9 ; CHECK-NEXT: [[ENTRY:.*]]:
10 ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
11 ; CHECK: [[VECTOR_PH]]:
12 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
13 ; CHECK: [[VECTOR_BODY]]:
14 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
15 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
16 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
17 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0
18 ; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i64 [[TMP1]], 1
19 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[STR]], i64 [[TMP2]]
20 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP3]], i32 -1
21 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i8>, ptr [[TMP4]], align 1
22 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i8> [[WIDE_VEC]], <8 x i8> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
23 ; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <8 x i8> [[WIDE_VEC]], <8 x i8> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
24 ; CHECK-NEXT: [[TMP5:%.*]] = add <4 x i8> [[STRIDED_VEC2]], [[STRIDED_VEC]]
25 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP0]]
26 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i32 0
27 ; CHECK-NEXT: store <4 x i8> [[TMP5]], ptr [[TMP7]], align 1
28 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
29 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
30 ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
31 ; CHECK: [[MIDDLE_BLOCK]]:
32 ; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
33 ; CHECK: [[SCALAR_PH]]:
34 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
35 ; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i64 [ 200, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
36 ; CHECK-NEXT: br label %[[LOOP:.*]]
38 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
39 ; CHECK-NEXT: [[IV2:%.*]] = phi i64 [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[IV2_NEXT:%.*]], %[[LOOP]] ]
40 ; CHECK-NEXT: [[OR_1:%.*]] = or disjoint i64 [[IV2]], 1
41 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[STR]], i64 [[OR_1]]
42 ; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[GEP1]], align 1
43 ; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i8, ptr [[STR]], i64 [[IV2]]
44 ; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[GEP0]], align 1
45 ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[TMP9]], [[TMP10]]
46 ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]]
47 ; CHECK-NEXT: store i8 [[ADD]], ptr [[GEP_DST]], align 1
48 ; CHECK-NEXT: [[IV2_NEXT]] = add i64 [[IV2]], 2
49 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
50 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 100
51 ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
53 ; CHECK-NEXT: ret void
58 loop: ; preds = %loop, %entry
59 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
60 %iv2 = phi i64 [ 0, %entry ], [ %iv2.next, %loop ]
61 %or.1 = or disjoint i64 %iv2, 1
62 %gep1 = getelementptr i8, ptr %str, i64 %or.1
63 %1 = load i8, ptr %gep1, align 1
64 %gep0 = getelementptr i8, ptr %str, i64 %iv2
65 %2 = load i8, ptr %gep0, align 1
67 %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv
68 store i8 %add, ptr %gep.dst, align 1
69 %iv2.next = add i64 %iv2, 2
70 %iv.next = add i64 %iv, 1
71 %ec = icmp eq i64 %iv.next, 100
72 br i1 %ec, label %exit, label %loop
78 define void @test_ig_insert_pos_at_end_of_vpbb(ptr noalias %dst, ptr noalias %src, i16 %x, i64 %N) {
79 ; CHECK-LABEL: define void @test_ig_insert_pos_at_end_of_vpbb(
80 ; CHECK-SAME: ptr noalias [[DST:%.*]], ptr noalias [[SRC:%.*]], i16 [[X:%.*]], i64 [[N:%.*]]) {
81 ; CHECK-NEXT: [[ENTRY:.*]]:
82 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
83 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP0]], 4
84 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
85 ; CHECK: [[VECTOR_PH]]:
86 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4
87 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
88 ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 4, i64 [[N_MOD_VF]]
89 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[TMP2]]
90 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
91 ; CHECK: [[VECTOR_BODY]]:
92 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
93 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0
94 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr nusw { i16, i16, i16, i16 }, ptr [[SRC]], i64 [[TMP3]], i32 2
95 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 -4
96 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i16>, ptr [[TMP5]], align 2
97 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i16> [[WIDE_VEC]], <16 x i16> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
98 ; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <16 x i16> [[WIDE_VEC]], <16 x i16> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
99 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i16> [[STRIDED_VEC]], i32 3
100 ; CHECK-NEXT: store i16 [[TMP6]], ptr [[DST]], align 2
101 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
102 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
103 ; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
104 ; CHECK: [[MIDDLE_BLOCK]]:
105 ; CHECK-NEXT: br label %[[SCALAR_PH]]
106 ; CHECK: [[SCALAR_PH]]:
107 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
108 ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
109 ; CHECK: [[LOOP_HEADER]]:
110 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
111 ; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr nusw { i16, i16, i16, i16 }, ptr [[SRC]], i64 [[IV]], i32 2
112 ; CHECK-NEXT: [[L_1:%.*]] = load i16, ptr [[GEP_2]], align 2
113 ; CHECK-NEXT: switch i16 [[L_1]], label %[[THEN:.*]] [
114 ; CHECK-NEXT: i16 0, label %[[LOOP_LATCH]]
115 ; CHECK-NEXT: i16 1, label %[[LOOP_LATCH]]
118 ; CHECK-NEXT: br label %[[LOOP_LATCH]]
119 ; CHECK: [[LOOP_LATCH]]:
120 ; CHECK-NEXT: [[P:%.*]] = phi i16 [ [[X]], %[[THEN]] ], [ 0, %[[LOOP_HEADER]] ], [ 0, %[[LOOP_HEADER]] ]
121 ; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr { i16, i16, i16, i16 }, ptr [[SRC]], i64 [[IV]]
122 ; CHECK-NEXT: [[L_2:%.*]] = load i16, ptr [[GEP_0]], align 2
123 ; CHECK-NEXT: store i16 [[L_2]], ptr [[DST]], align 2
124 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
125 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]]
126 ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]]
128 ; CHECK-NEXT: ret void
131 br label %loop.header
133 loop.header: ; preds = %loop.latch, %entry
134 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
135 %gep.2 = getelementptr nusw { i16, i16, i16, i16 }, ptr %src, i64 %iv, i32 2
136 %l.1 = load i16, ptr %gep.2, align 2
137 switch i16 %l.1, label %then [
138 i16 0, label %loop.latch
139 i16 1, label %loop.latch
146 %p = phi i16 [ %x, %then ], [ 0, %loop.header ], [ 0, %loop.header ]
147 %gep.0 = getelementptr { i16, i16, i16, i16 }, ptr %src, i64 %iv
148 %l.2 = load i16, ptr %gep.0, align 2
149 store i16 %l.2, ptr %dst, align 2
150 %iv.next = add nsw i64 %iv, 1
151 %ec = icmp eq i64 %iv, %N
152 br i1 %ec, label %exit, label %loop.header
158 ; FIXME: Currently the start address of the interleav group is computed
160 define i64 @interleave_group_load_pointer_type(ptr %start, ptr %end) {
161 ; CHECK-LABEL: define i64 @interleave_group_load_pointer_type(
162 ; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) {
163 ; CHECK-NEXT: [[ENTRY:.*]]:
164 ; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64
165 ; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64
166 ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[END1]], [[START2]]
167 ; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 24
168 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
169 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], 4
170 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
171 ; CHECK: [[VECTOR_PH]]:
172 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
173 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
174 ; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i64 4, i64 [[N_MOD_VF]]
175 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP4]]
176 ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[N_VEC]], 24
177 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP5]]
178 ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
179 ; CHECK: [[VECTOR_BODY]]:
180 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
181 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ]
182 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 24
183 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 0
184 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP6]]
185 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 16
186 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 -8
187 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x ptr>, ptr [[TMP8]], align 8
188 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x ptr> [[WIDE_VEC]], <12 x ptr> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
189 ; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <12 x ptr> [[WIDE_VEC]], <12 x ptr> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
190 ; CHECK-NEXT: [[TMP9:%.*]] = ptrtoint <4 x ptr> [[STRIDED_VEC3]] to <4 x i64>
191 ; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint <4 x ptr> [[STRIDED_VEC]] to <4 x i64>
192 ; CHECK-NEXT: [[TMP11:%.*]] = or <4 x i64> [[TMP9]], [[TMP10]]
193 ; CHECK-NEXT: [[TMP12]] = or <4 x i64> [[TMP11]], [[VEC_PHI]]
194 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
195 ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
196 ; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
197 ; CHECK: [[MIDDLE_BLOCK]]:
198 ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> [[TMP12]])
199 ; CHECK-NEXT: br label %[[SCALAR_PH]]
200 ; CHECK: [[SCALAR_PH]]:
201 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ]
202 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP14]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
203 ; CHECK-NEXT: br label %[[LOOP:.*]]
205 ; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
206 ; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
207 ; CHECK-NEXT: [[GEP_16:%.*]] = getelementptr i8, ptr [[PTR_IV]], i64 16
208 ; CHECK-NEXT: [[L_16:%.*]] = load ptr, ptr [[GEP_16]], align 8
209 ; CHECK-NEXT: [[P_16:%.*]] = ptrtoint ptr [[L_16]] to i64
210 ; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr i8, ptr [[PTR_IV]], i64 8
211 ; CHECK-NEXT: [[L_8:%.*]] = load ptr, ptr [[GEP_8]], align 8
212 ; CHECK-NEXT: [[P_8:%.*]] = ptrtoint ptr [[L_8]] to i64
213 ; CHECK-NEXT: [[OR_1:%.*]] = or i64 [[P_16]], [[P_8]]
214 ; CHECK-NEXT: [[RED_NEXT]] = or i64 [[OR_1]], [[RED]]
215 ; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 24
216 ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]]
217 ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
219 ; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], %[[LOOP]] ]
220 ; CHECK-NEXT: ret i64 [[RED_NEXT_LCSSA]]
226 %ptr.iv = phi ptr [ %start, %entry ], [ %ptr.iv.next, %loop ]
227 %red = phi i64 [ 0, %entry ], [ %red.next, %loop ]
228 %gep.16 = getelementptr i8, ptr %ptr.iv, i64 16
229 %l.16 = load ptr, ptr %gep.16, align 8
230 %p.16 = ptrtoint ptr %l.16 to i64
231 %gep.8 = getelementptr i8, ptr %ptr.iv, i64 8
232 %l.8 = load ptr, ptr %gep.8, align 8
233 %p.8 = ptrtoint ptr %l.8 to i64
234 %or.1 = or i64 %p.16, %p.8
235 %red.next = or i64 %or.1, %red
236 %ptr.iv.next = getelementptr nusw i8, ptr %ptr.iv, i64 24
237 %ec = icmp eq ptr %ptr.iv, %end
238 br i1 %ec, label %exit, label %loop
244 ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
245 ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
246 ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
247 ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
248 ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
249 ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
250 ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
251 ; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}