1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -loop-vectorize -S -mattr=avx512f -instcombine < %s | FileCheck %s
4 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
5 target triple = "x86_64-unknown-linux-gnu"
7 ; first test checks that loop with a reduction and a uniform store gets
9 ; CHECK-LABEL: inv_val_store_to_inv_address_with_reduction
10 ; CHECK-LABEL: vector.memcheck:
11 ; CHECK: found.conflict
13 ; CHECK-LABEL: vector.body:
14 ; CHECK: %vec.phi = phi <16 x i32> [ zeroinitializer, %vector.ph ], [ [[ADD:%[a-zA-Z0-9.]+]], %vector.body ]
15 ; CHECK: %wide.load = load <16 x i32>
16 ; CHECK: [[ADD]] = add <16 x i32> %vec.phi, %wide.load
17 ; CHECK: store i32 %ntrunc, i32* %a
18 ; CHECK-NOT: store i32 %ntrunc, i32* %a
19 ; CHECK: %index.next = add i64 %index, 64
21 ; CHECK-LABEL: middle.block:
22 ; CHECK: %rdx.shuf = shufflevector <16 x i32>
23 define i32 @inv_val_store_to_inv_address_with_reduction(i32* %a, i64 %n, i32* %b) {
25 %ntrunc = trunc i64 %n to i32
28 for.body: ; preds = %for.body, %entry
29 %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
30 %tmp0 = phi i32 [ %tmp3, %for.body ], [ 0, %entry ]
31 %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
32 %tmp2 = load i32, i32* %tmp1, align 8
33 %tmp3 = add i32 %tmp0, %tmp2
34 store i32 %ntrunc, i32* %a
35 %i.next = add nuw nsw i64 %i, 1
36 %cond = icmp slt i64 %i.next, %n
37 br i1 %cond, label %for.body, label %for.end
39 for.end: ; preds = %for.body
40 %tmp4 = phi i32 [ %tmp3, %for.body ]
45 ; if (b[i] == k) a = ntrunc
46 define void @inv_val_store_to_inv_address_conditional(i32* %a, i64 %n, i32* %b, i32 %k) {
47 ; CHECK-LABEL: @inv_val_store_to_inv_address_conditional(
49 ; CHECK-NEXT: [[NTRUNC:%.*]] = trunc i64 [[N:%.*]] to i32
50 ; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i64 [[N]], 1
51 ; CHECK-NEXT: [[SMAX:%.*]] = select i1 [[TMP0]], i64 [[N]], i64 1
52 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 16
53 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
54 ; CHECK: vector.memcheck:
55 ; CHECK-NEXT: [[A4:%.*]] = bitcast i32* [[A:%.*]] to i8*
56 ; CHECK-NEXT: [[B1:%.*]] = bitcast i32* [[B:%.*]] to i8*
57 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[N]], 1
58 ; CHECK-NEXT: [[SMAX2:%.*]] = select i1 [[TMP1]], i64 [[N]], i64 1
59 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B]], i64 [[SMAX2]]
60 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, i8* [[A4]], i64 1
61 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i8* [[UGLYGEP]], [[B1]]
62 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[A]]
63 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
64 ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
66 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], 9223372036854775792
67 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <16 x i32> undef, i32 [[K:%.*]], i32 0
68 ; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT5]], <16 x i32> undef, <16 x i32> zeroinitializer
69 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <16 x i32> undef, i32 [[NTRUNC]], i32 0
70 ; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT7]], <16 x i32> undef, <16 x i32> zeroinitializer
71 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <16 x i32*> undef, i32* [[A]], i32 0
72 ; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <16 x i32*> [[BROADCAST_SPLATINSERT9]], <16 x i32*> undef, <16 x i32> zeroinitializer
73 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
75 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
76 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
77 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <16 x i32>*
78 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, <16 x i32>* [[TMP3]], align 8, !alias.scope !8, !noalias !11
79 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <16 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT6]]
80 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP2]] to <16 x i32>*
81 ; CHECK-NEXT: store <16 x i32> [[BROADCAST_SPLAT8]], <16 x i32>* [[TMP5]], align 4, !alias.scope !8, !noalias !11
82 ; CHECK-NEXT: call void @llvm.masked.scatter.v16i32.v16p0i32(<16 x i32> [[BROADCAST_SPLAT8]], <16 x i32*> [[BROADCAST_SPLAT10]], i32 4, <16 x i1> [[TMP4]]), !alias.scope !11
83 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
84 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
85 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !13
86 ; CHECK: middle.block:
87 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]]
88 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
90 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
91 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
93 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
94 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]]
95 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 8
96 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP2]], [[K]]
97 ; CHECK-NEXT: store i32 [[NTRUNC]], i32* [[TMP1]], align 4
98 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_STORE:%.*]], label [[LATCH]]
100 ; CHECK-NEXT: store i32 [[NTRUNC]], i32* [[A]], align 4
101 ; CHECK-NEXT: br label [[LATCH]]
103 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
104 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
105 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop !14
107 ; CHECK-NEXT: ret void
110 %ntrunc = trunc i64 %n to i32
113 for.body: ; preds = %for.body, %entry
114 %i = phi i64 [ %i.next, %latch ], [ 0, %entry ]
115 %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
116 %tmp2 = load i32, i32* %tmp1, align 8
117 %cmp = icmp eq i32 %tmp2, %k
118 store i32 %ntrunc, i32* %tmp1
119 br i1 %cmp, label %cond_store, label %latch
122 store i32 %ntrunc, i32* %a
126 %i.next = add nuw nsw i64 %i, 1
127 %cond = icmp slt i64 %i.next, %n
128 br i1 %cond, label %for.body, label %for.end
130 for.end: ; preds = %for.body
134 define void @variant_val_store_to_inv_address_conditional(i32* %a, i64 %n, i32* %b, i32* %c, i32 %k) {
135 ; CHECK-LABEL: @variant_val_store_to_inv_address_conditional(
137 ; CHECK-NEXT: [[NTRUNC:%.*]] = trunc i64 [[N:%.*]] to i32
138 ; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i64 [[N]], 1
139 ; CHECK-NEXT: [[SMAX:%.*]] = select i1 [[TMP0]], i64 [[N]], i64 1
140 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 16
141 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
142 ; CHECK: vector.memcheck:
143 ; CHECK-NEXT: [[C5:%.*]] = bitcast i32* [[C:%.*]] to i8*
144 ; CHECK-NEXT: [[B1:%.*]] = bitcast i32* [[B:%.*]] to i8*
145 ; CHECK-NEXT: [[A4:%.*]] = bitcast i32* [[A:%.*]] to i8*
146 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[N]], 1
147 ; CHECK-NEXT: [[SMAX2:%.*]] = select i1 [[TMP1]], i64 [[N]], i64 1
148 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B]], i64 [[SMAX2]]
149 ; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, i8* [[A4]], i64 1
150 ; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i32, i32* [[C]], i64 [[SMAX2]]
151 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt i8* [[UGLYGEP]], [[B1]]
152 ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt i32* [[SCEVGEP]], [[A]]
153 ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
154 ; CHECK-NEXT: [[BOUND08:%.*]] = icmp ugt i32* [[SCEVGEP6]], [[B]]
155 ; CHECK-NEXT: [[BOUND19:%.*]] = icmp ugt i32* [[SCEVGEP]], [[C]]
156 ; CHECK-NEXT: [[FOUND_CONFLICT10:%.*]] = and i1 [[BOUND08]], [[BOUND19]]
157 ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT10]]
158 ; CHECK-NEXT: [[BOUND012:%.*]] = icmp ugt i32* [[SCEVGEP6]], [[A]]
159 ; CHECK-NEXT: [[BOUND113:%.*]] = icmp ugt i8* [[UGLYGEP]], [[C5]]
160 ; CHECK-NEXT: [[FOUND_CONFLICT14:%.*]] = and i1 [[BOUND012]], [[BOUND113]]
161 ; CHECK-NEXT: [[CONFLICT_RDX15:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT14]]
162 ; CHECK-NEXT: br i1 [[CONFLICT_RDX15]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
164 ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX]], 9223372036854775792
165 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT16:%.*]] = insertelement <16 x i32> undef, i32 [[K:%.*]], i32 0
166 ; CHECK-NEXT: [[BROADCAST_SPLAT17:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT16]], <16 x i32> undef, <16 x i32> zeroinitializer
167 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT18:%.*]] = insertelement <16 x i32> undef, i32 [[NTRUNC]], i32 0
168 ; CHECK-NEXT: [[BROADCAST_SPLAT19:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT18]], <16 x i32> undef, <16 x i32> zeroinitializer
169 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT20:%.*]] = insertelement <16 x i32*> undef, i32* [[A]], i32 0
170 ; CHECK-NEXT: [[BROADCAST_SPLAT21:%.*]] = shufflevector <16 x i32*> [[BROADCAST_SPLATINSERT20]], <16 x i32*> undef, <16 x i32> zeroinitializer
171 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
172 ; CHECK: vector.body:
173 ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
174 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDEX]]
175 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[TMP2]] to <16 x i32>*
176 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, <16 x i32>* [[TMP3]], align 8, !alias.scope !15, !noalias !18
177 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <16 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT17]]
178 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP2]] to <16 x i32>*
179 ; CHECK-NEXT: store <16 x i32> [[BROADCAST_SPLAT19]], <16 x i32>* [[TMP5]], align 4, !alias.scope !15, !noalias !18
180 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[INDEX]]
181 ; CHECK-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <16 x i32>*
182 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0v16i32(<16 x i32>* [[TMP7]], i32 8, <16 x i1> [[TMP4]], <16 x i32> undef), !alias.scope !21
183 ; CHECK-NEXT: call void @llvm.masked.scatter.v16i32.v16p0i32(<16 x i32> [[WIDE_MASKED_LOAD]], <16 x i32*> [[BROADCAST_SPLAT21]], i32 4, <16 x i1> [[TMP4]]), !alias.scope !22, !noalias !21
184 ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
185 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
186 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !23
187 ; CHECK: middle.block:
188 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]]
189 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
191 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
192 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
194 ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[LATCH:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
195 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[I]]
196 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 8
197 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP2]], [[K]]
198 ; CHECK-NEXT: store i32 [[NTRUNC]], i32* [[TMP1]], align 4
199 ; CHECK-NEXT: br i1 [[CMP]], label [[COND_STORE:%.*]], label [[LATCH]]
201 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[I]]
202 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 8
203 ; CHECK-NEXT: store i32 [[TMP4]], i32* [[A]], align 4
204 ; CHECK-NEXT: br label [[LATCH]]
206 ; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
207 ; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT]], [[N]]
208 ; CHECK-NEXT: br i1 [[COND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop !24
210 ; CHECK-NEXT: ret void
213 %ntrunc = trunc i64 %n to i32
216 for.body: ; preds = %for.body, %entry
217 %i = phi i64 [ %i.next, %latch ], [ 0, %entry ]
218 %tmp1 = getelementptr inbounds i32, i32* %b, i64 %i
219 %tmp2 = load i32, i32* %tmp1, align 8
220 %cmp = icmp eq i32 %tmp2, %k
221 store i32 %ntrunc, i32* %tmp1
222 br i1 %cmp, label %cond_store, label %latch
225 %tmp3 = getelementptr inbounds i32, i32* %c, i64 %i
226 %tmp4 = load i32, i32* %tmp3, align 8
227 store i32 %tmp4, i32* %a
231 %i.next = add nuw nsw i64 %i, 1
232 %cond = icmp slt i64 %i.next, %n
233 br i1 %cond, label %for.body, label %for.end
235 for.end: ; preds = %for.body