1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2 ; RUN: opt -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
4 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2"
5 target triple = "x86_64-apple-macos"
7 ; Both %l3 and the earlier store to %gep.iv.2 access the same location. %l1
8 ; cannot be added safely to the same interleave group as %l2 and %l3, because
9 ; that would mean %l2 and %l3 would need to be hoisted across the store.
10 define void @pr63602_1(ptr %arr) {
11 ; CHECK-LABEL: define void @pr63602_1
12 ; CHECK-SAME: (ptr [[ARR:%.*]]) {
14 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
16 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
18 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
19 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
20 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[TMP0]]
21 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0
22 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[INDEX]], 3
23 ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = add i64 4, [[TMP2]]
24 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX2]], 0
25 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX2]], 3
26 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX2]], 6
27 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX2]], 9
28 ; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[TMP1]], 4
29 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP7]]
30 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[TMP8]], align 4
31 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
32 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP3]]
33 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP4]]
34 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP5]]
35 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP6]]
36 ; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 0
37 ; CHECK-NEXT: store i32 [[TMP14]], ptr [[TMP10]], align 4
38 ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 1
39 ; CHECK-NEXT: store i32 [[TMP15]], ptr [[TMP11]], align 4
40 ; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 2
41 ; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP12]], align 4
42 ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 3
43 ; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP13]], align 4
44 ; CHECK-NEXT: [[TMP18:%.*]] = add nuw nsw i64 [[TMP1]], 2
45 ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP18]]
46 ; CHECK-NEXT: [[WIDE_VEC3:%.*]] = load <12 x i32>, ptr [[TMP19]], align 4
47 ; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <12 x i32> [[WIDE_VEC3]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
48 ; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <12 x i32> [[WIDE_VEC3]], <12 x i32> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
49 ; CHECK-NEXT: [[TMP21:%.*]] = add <4 x i32> [[STRIDED_VEC5]], [[STRIDED_VEC4]]
50 ; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[TMP21]], i32 0
51 ; CHECK-NEXT: store i32 [[TMP22]], ptr [[TMP10]], align 4
52 ; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i32> [[TMP21]], i32 1
53 ; CHECK-NEXT: store i32 [[TMP23]], ptr [[TMP11]], align 4
54 ; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[TMP21]], i32 2
55 ; CHECK-NEXT: store i32 [[TMP24]], ptr [[TMP12]], align 4
56 ; CHECK-NEXT: [[TMP25:%.*]] = extractelement <4 x i32> [[TMP21]], i32 3
57 ; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP13]], align 4
58 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
59 ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
60 ; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
61 ; CHECK: middle.block:
62 ; CHECK-NEXT: br label [[SCALAR_PH]]
64 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 49, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ]
65 ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 52, [[MIDDLE_BLOCK]] ], [ 4, [[ENTRY]] ]
66 ; CHECK-NEXT: br label [[LOOP:%.*]]
68 ; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
69 ; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
70 ; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 3
71 ; CHECK-NEXT: [[IV_1_PLUS_4:%.*]] = add nuw nsw i64 [[IV_1]], 4
72 ; CHECK-NEXT: [[GEP_IV_1_PLUS_4:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_1_PLUS_4]]
73 ; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[GEP_IV_1_PLUS_4]], align 4
74 ; CHECK-NEXT: [[GEP_IV_2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_2]]
75 ; CHECK-NEXT: store i32 [[L1]], ptr [[GEP_IV_2]], align 4
76 ; CHECK-NEXT: [[IV_1_PLUS_2:%.*]] = add nuw nsw i64 [[IV_1]], 2
77 ; CHECK-NEXT: [[GEP_IV_1_PLUS_2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_1_PLUS_2]]
78 ; CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[GEP_IV_1_PLUS_2]], align 4
79 ; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[GEP_IV_2]], align 4
80 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L3]], [[L2]]
81 ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_IV_2]], align 4
82 ; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 3
83 ; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[IV_2]], 50
84 ; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
86 ; CHECK-NEXT: ret void
92 %iv.1 = phi i64 [ 1, %entry ], [ %iv.1.next, %loop ]
93 %iv.2 = phi i64 [ 4, %entry ], [ %iv.2.next, %loop ]
94 %iv.1.next = add nuw nsw i64 %iv.1, 3
95 %iv.1.plus.4 = add nuw nsw i64 %iv.1, 4
96 %gep.iv.1.plus.4 = getelementptr inbounds i32, ptr %arr, i64 %iv.1.plus.4
97 %l1 = load i32, ptr %gep.iv.1.plus.4
98 %gep.iv.2 = getelementptr inbounds i32, ptr %arr, i64 %iv.2
99 store i32 %l1, ptr %gep.iv.2
100 %iv.1.plus.2 = add nuw nsw i64 %iv.1, 2
101 %gep.iv.1.plus.2= getelementptr inbounds i32, ptr %arr, i64 %iv.1.plus.2
102 %l2 = load i32, ptr %gep.iv.1.plus.2
103 %l3 = load i32, ptr %gep.iv.2
104 %add = add i32 %l3 , %l2
105 store i32 %add, ptr %gep.iv.2
106 %iv.2.next = add nuw nsw i64 %iv.2, 3
107 %icmp = icmp ugt i64 %iv.2, 50
108 br i1 %icmp, label %exit, label %loop
114 ; %l3 and the preceeding store access the same memory location. So, we cannot
115 ; have the loads %l1, %l2 and %l3 in the same interleave group since it would
116 ; mean hoisting the load %l2 and %l3 across the store.
118 ; Unlike the above case, since we go through the last load in program order and
119 ; compare against the obstructing stores (%l2 versus the store) there is no
120 ; dependency. However, the other load in %l2's interleave group (%l3) does
121 ; obstruct with the store.
122 define void @pr63602_2(ptr %arr) {
123 ; CHECK-LABEL: define void @pr63602_2
124 ; CHECK-SAME: (ptr [[ARR:%.*]]) {
126 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
128 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
129 ; CHECK: vector.body:
130 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
131 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
132 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[TMP0]]
133 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 0
134 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 3
135 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 6
136 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 9
137 ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[INDEX]], 3
138 ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = add i64 4, [[TMP5]]
139 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX2]], 0
140 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX2]], 3
141 ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX2]], 6
142 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX2]], 9
143 ; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[TMP1]], 4
144 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP10]]
145 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[TMP11]], align 4
146 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
147 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP6]]
148 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP7]]
149 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP8]]
150 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP9]]
151 ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 0
152 ; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP13]], align 4
153 ; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 1
154 ; CHECK-NEXT: store i32 [[TMP18]], ptr [[TMP14]], align 4
155 ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 2
156 ; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP15]], align 4
157 ; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[STRIDED_VEC]], i32 3
158 ; CHECK-NEXT: store i32 [[TMP20]], ptr [[TMP16]], align 4
159 ; CHECK-NEXT: [[TMP21:%.*]] = add nuw nsw i64 [[TMP1]], 2
160 ; CHECK-NEXT: [[TMP22:%.*]] = add nuw nsw i64 [[TMP2]], 2
161 ; CHECK-NEXT: [[TMP23:%.*]] = add nuw nsw i64 [[TMP3]], 2
162 ; CHECK-NEXT: [[TMP24:%.*]] = add nuw nsw i64 [[TMP4]], 2
163 ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP21]]
164 ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP22]]
165 ; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP23]]
166 ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[TMP24]]
167 ; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP13]], align 4
168 ; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP14]], align 4
169 ; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP15]], align 4
170 ; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4
171 ; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> poison, i32 [[TMP29]], i32 0
172 ; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP33]], i32 [[TMP30]], i32 1
173 ; CHECK-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> [[TMP34]], i32 [[TMP31]], i32 2
174 ; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i32> [[TMP35]], i32 [[TMP32]], i32 3
175 ; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP25]], align 4
176 ; CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP26]], align 4
177 ; CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP27]], align 4
178 ; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP28]], align 4
179 ; CHECK-NEXT: [[TMP41:%.*]] = insertelement <4 x i32> poison, i32 [[TMP37]], i32 0
180 ; CHECK-NEXT: [[TMP42:%.*]] = insertelement <4 x i32> [[TMP41]], i32 [[TMP38]], i32 1
181 ; CHECK-NEXT: [[TMP43:%.*]] = insertelement <4 x i32> [[TMP42]], i32 [[TMP39]], i32 2
182 ; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i32> [[TMP43]], i32 [[TMP40]], i32 3
183 ; CHECK-NEXT: [[TMP45:%.*]] = add <4 x i32> [[TMP36]], [[TMP44]]
184 ; CHECK-NEXT: [[TMP46:%.*]] = extractelement <4 x i32> [[TMP45]], i32 0
185 ; CHECK-NEXT: store i32 [[TMP46]], ptr [[TMP13]], align 4
186 ; CHECK-NEXT: [[TMP47:%.*]] = extractelement <4 x i32> [[TMP45]], i32 1
187 ; CHECK-NEXT: store i32 [[TMP47]], ptr [[TMP14]], align 4
188 ; CHECK-NEXT: [[TMP48:%.*]] = extractelement <4 x i32> [[TMP45]], i32 2
189 ; CHECK-NEXT: store i32 [[TMP48]], ptr [[TMP15]], align 4
190 ; CHECK-NEXT: [[TMP49:%.*]] = extractelement <4 x i32> [[TMP45]], i32 3
191 ; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP16]], align 4
192 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
193 ; CHECK-NEXT: [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
194 ; CHECK-NEXT: br i1 [[TMP50]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
195 ; CHECK: middle.block:
196 ; CHECK-NEXT: br label [[SCALAR_PH]]
198 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 49, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ]
199 ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 52, [[MIDDLE_BLOCK]] ], [ 4, [[ENTRY]] ]
200 ; CHECK-NEXT: br label [[LOOP:%.*]]
202 ; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
203 ; CHECK-NEXT: [[IV_2:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP]] ]
204 ; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 3
205 ; CHECK-NEXT: [[IV_1_PLUS_4:%.*]] = add nuw nsw i64 [[IV_1]], 4
206 ; CHECK-NEXT: [[GEP_IV_1_PLUS_4:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_1_PLUS_4]]
207 ; CHECK-NEXT: [[L1:%.*]] = load i32, ptr [[GEP_IV_1_PLUS_4]], align 4
208 ; CHECK-NEXT: [[GEP_IV_2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_2]]
209 ; CHECK-NEXT: store i32 [[L1]], ptr [[GEP_IV_2]], align 4
210 ; CHECK-NEXT: [[IV_1_PLUS_2:%.*]] = add nuw nsw i64 [[IV_1]], 2
211 ; CHECK-NEXT: [[GEP_IV_1_PLUS_2:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i64 [[IV_1_PLUS_2]]
212 ; CHECK-NEXT: [[L3:%.*]] = load i32, ptr [[GEP_IV_2]], align 4
213 ; CHECK-NEXT: [[L2:%.*]] = load i32, ptr [[GEP_IV_1_PLUS_2]], align 4
214 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L3]], [[L2]]
215 ; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_IV_2]], align 4
216 ; CHECK-NEXT: [[IV_2_NEXT]] = add nuw nsw i64 [[IV_2]], 3
217 ; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[IV_2]], 50
218 ; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
220 ; CHECK-NEXT: ret void
226 %iv.1 = phi i64 [ 1, %entry ], [ %iv.1.next, %loop ]
227 %iv.2 = phi i64 [ 4, %entry ], [ %iv.2.next, %loop ]
228 %iv.1.next = add nuw nsw i64 %iv.1, 3
229 %iv.1.plus.4 = add nuw nsw i64 %iv.1, 4
230 %gep.iv.1.plus.4 = getelementptr inbounds i32, ptr %arr, i64 %iv.1.plus.4
231 %l1 = load i32, ptr %gep.iv.1.plus.4
232 %gep.iv.2 = getelementptr inbounds i32, ptr %arr, i64 %iv.2
233 store i32 %l1, ptr %gep.iv.2
234 %iv.1.plus.2 = add nuw nsw i64 %iv.1, 2
235 %gep.iv.1.plus.2= getelementptr inbounds i32, ptr %arr, i64 %iv.1.plus.2
236 %l3 = load i32, ptr %gep.iv.2
237 %l2 = load i32, ptr %gep.iv.1.plus.2
238 %add = add i32 %l3 , %l2
239 store i32 %add, ptr %gep.iv.2
240 %iv.2.next = add nuw nsw i64 %iv.2, 3
241 %icmp = icmp ugt i64 %iv.2, 50
242 br i1 %icmp, label %exit, label %loop