1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=loop-vectorize,instcombine,simplifycfg -simplifycfg-require-and-preserve-domtree=1 -tail-predication=enabled < %s -S -o - | FileCheck %s
4 target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
5 target triple = "thumbv8.1m.main-arm-none-eabi"
7 ; Should not be vectorized
8 define i64 @add_i64_i64(ptr nocapture readonly %x, i32 %n) #0 {
9 ; CHECK-LABEL: @add_i64_i64(
11 ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
12 ; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
14 ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
15 ; CHECK-NEXT: [[R_07:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
16 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[X:%.*]], i32 [[I_08]]
17 ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
18 ; CHECK-NEXT: [[ADD]] = add nsw i64 [[TMP0]], [[R_07]]
19 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
20 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
21 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
22 ; CHECK: for.cond.cleanup:
23 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD]], [[FOR_BODY]] ]
24 ; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
27 %cmp6 = icmp sgt i32 %n, 0
28 br i1 %cmp6, label %for.body, label %for.cond.cleanup
30 for.body: ; preds = %entry, %for.body
31 %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
32 %r.07 = phi i64 [ %add, %for.body ], [ 0, %entry ]
33 %arrayidx = getelementptr inbounds i64, ptr %x, i32 %i.08
34 %0 = load i64, ptr %arrayidx, align 8
35 %add = add nsw i64 %0, %r.07
36 %inc = add nuw nsw i32 %i.08, 1
37 %exitcond = icmp eq i32 %inc, %n
38 br i1 %exitcond, label %for.cond.cleanup, label %for.body
40 for.cond.cleanup: ; preds = %for.body, %entry
41 %r.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
46 ; FIXME: TailPredicate
47 define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
48 ; CHECK-LABEL: @add_i32_i64(
50 ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
51 ; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
52 ; CHECK: for.body.preheader:
53 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4
54 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
56 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483644
57 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
59 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
60 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
61 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
62 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
63 ; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
64 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
65 ; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
66 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
67 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
68 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
69 ; CHECK: middle.block:
70 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
71 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
73 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
74 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
75 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
77 ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
78 ; CHECK-NEXT: [[R_07:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
79 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i32 [[I_08]]
80 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
81 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP5]] to i64
82 ; CHECK-NEXT: [[ADD]] = add nsw i64 [[R_07]], [[CONV]]
83 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
84 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
85 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
86 ; CHECK: for.cond.cleanup:
87 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD]], [[FOR_BODY]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
88 ; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
91 %cmp6 = icmp sgt i32 %n, 0
92 br i1 %cmp6, label %for.body, label %for.cond.cleanup
94 for.body: ; preds = %entry, %for.body
95 %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
96 %r.07 = phi i64 [ %add, %for.body ], [ 0, %entry ]
97 %arrayidx = getelementptr inbounds i32, ptr %x, i32 %i.08
98 %0 = load i32, ptr %arrayidx, align 4
99 %conv = sext i32 %0 to i64
100 %add = add nsw i64 %r.07, %conv
101 %inc = add nuw nsw i32 %i.08, 1
102 %exitcond = icmp eq i32 %inc, %n
103 br i1 %exitcond, label %for.cond.cleanup, label %for.body
105 for.cond.cleanup: ; preds = %for.body, %entry
106 %r.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
111 ; FIXME: TailPredicate
112 define i64 @add_i16_i64(ptr nocapture readonly %x, i32 %n) #0 {
113 ; CHECK-LABEL: @add_i16_i64(
115 ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
116 ; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
117 ; CHECK: for.body.preheader:
118 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4
119 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
121 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483644
122 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
123 ; CHECK: vector.body:
124 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
125 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
126 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
127 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
128 ; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64>
129 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
130 ; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
131 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
132 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
133 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
134 ; CHECK: middle.block:
135 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
136 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
138 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
139 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
140 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
142 ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
143 ; CHECK-NEXT: [[R_07:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
144 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[I_08]]
145 ; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
146 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i64
147 ; CHECK-NEXT: [[ADD]] = add nsw i64 [[R_07]], [[CONV]]
148 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
149 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
150 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
151 ; CHECK: for.cond.cleanup:
152 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD]], [[FOR_BODY]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
153 ; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
156 %cmp6 = icmp sgt i32 %n, 0
157 br i1 %cmp6, label %for.body, label %for.cond.cleanup
159 for.body: ; preds = %entry, %for.body
160 %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
161 %r.07 = phi i64 [ %add, %for.body ], [ 0, %entry ]
162 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.08
163 %0 = load i16, ptr %arrayidx, align 2
164 %conv = sext i16 %0 to i64
165 %add = add nsw i64 %r.07, %conv
166 %inc = add nuw nsw i32 %i.08, 1
167 %exitcond = icmp eq i32 %inc, %n
168 br i1 %exitcond, label %for.cond.cleanup, label %for.body
170 for.cond.cleanup: ; preds = %for.body, %entry
171 %r.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
176 ; FIXME: TailPredicate
177 define i64 @add_i8_i64(ptr nocapture readonly %x, i32 %n) #0 {
178 ; CHECK-LABEL: @add_i8_i64(
180 ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
181 ; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
182 ; CHECK: for.body.preheader:
183 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4
184 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
186 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483644
187 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
188 ; CHECK: vector.body:
189 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
190 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
191 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
192 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
193 ; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i64>
194 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
195 ; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
196 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
197 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
198 ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
199 ; CHECK: middle.block:
200 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
201 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
203 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
204 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
205 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
207 ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
208 ; CHECK-NEXT: [[R_07:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
209 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i32 [[I_08]]
210 ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
211 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP5]] to i64
212 ; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[R_07]], [[CONV]]
213 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1
214 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
215 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
216 ; CHECK: for.cond.cleanup:
217 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD]], [[FOR_BODY]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ]
218 ; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
221 %cmp6 = icmp sgt i32 %n, 0
222 br i1 %cmp6, label %for.body, label %for.cond.cleanup
224 for.body: ; preds = %entry, %for.body
225 %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
226 %r.07 = phi i64 [ %add, %for.body ], [ 0, %entry ]
227 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.08
228 %0 = load i8, ptr %arrayidx, align 1
229 %conv = zext i8 %0 to i64
230 %add = add nuw nsw i64 %r.07, %conv
231 %inc = add nuw nsw i32 %i.08, 1
232 %exitcond = icmp eq i32 %inc, %n
233 br i1 %exitcond, label %for.cond.cleanup, label %for.body
235 for.cond.cleanup: ; preds = %for.body, %entry
236 %r.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
240 ; 4x to use VADDV.u32
241 define i32 @add_i32_i32(ptr nocapture readonly %x, i32 %n) #0 {
242 ; CHECK-LABEL: @add_i32_i32(
244 ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
245 ; CHECK-NEXT: br i1 [[CMP6]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
247 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 3
248 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -4
249 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
250 ; CHECK: vector.body:
251 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
252 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
253 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]])
254 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
255 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP0]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
256 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
257 ; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
258 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
259 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
260 ; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
261 ; CHECK: for.cond.cleanup:
262 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[VECTOR_BODY]] ]
263 ; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
266 %cmp6 = icmp sgt i32 %n, 0
267 br i1 %cmp6, label %for.body, label %for.cond.cleanup
269 for.body: ; preds = %entry, %for.body
270 %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
271 %r.07 = phi i32 [ %add, %for.body ], [ 0, %entry ]
272 %arrayidx = getelementptr inbounds i32, ptr %x, i32 %i.08
273 %0 = load i32, ptr %arrayidx, align 4
274 %add = add nsw i32 %0, %r.07
275 %inc = add nuw nsw i32 %i.08, 1
276 %exitcond = icmp eq i32 %inc, %n
277 br i1 %exitcond, label %for.cond.cleanup, label %for.body
279 for.cond.cleanup: ; preds = %for.body, %entry
280 %r.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
284 ; 8x to use VADDV.u16
285 define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) #0 {
286 ; CHECK-LABEL: @add_i16_i32(
288 ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
289 ; CHECK-NEXT: br i1 [[CMP6]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
291 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 7
292 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -8
293 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
294 ; CHECK: vector.body:
295 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
296 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
297 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
298 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
299 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP0]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
300 ; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD]] to <8 x i32>
301 ; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP1]], <8 x i32> zeroinitializer
302 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
303 ; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
304 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
305 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
306 ; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
307 ; CHECK: for.cond.cleanup:
308 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP4]], [[VECTOR_BODY]] ]
309 ; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
312 %cmp6 = icmp sgt i32 %n, 0
313 br i1 %cmp6, label %for.body, label %for.cond.cleanup
315 for.body: ; preds = %entry, %for.body
316 %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
317 %r.07 = phi i32 [ %add, %for.body ], [ 0, %entry ]
318 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.08
319 %0 = load i16, ptr %arrayidx, align 2
320 %conv = sext i16 %0 to i32
321 %add = add nsw i32 %r.07, %conv
322 %inc = add nuw nsw i32 %i.08, 1
323 %exitcond = icmp eq i32 %inc, %n
324 br i1 %exitcond, label %for.cond.cleanup, label %for.body
326 for.cond.cleanup: ; preds = %for.body, %entry
327 %r.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
331 ; 16x to use VADDV.u16
332 define i32 @add_i8_i32(ptr nocapture readonly %x, i32 %n) #0 {
333 ; CHECK-LABEL: @add_i8_i32(
335 ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0
336 ; CHECK-NEXT: br i1 [[CMP6]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
338 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 15
339 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -16
340 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
341 ; CHECK: vector.body:
342 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
343 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
344 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 [[N]])
345 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
346 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
347 ; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
348 ; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP1]], <16 x i32> zeroinitializer
349 ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP2]])
350 ; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
351 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
352 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
353 ; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
354 ; CHECK: for.cond.cleanup:
355 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP4]], [[VECTOR_BODY]] ]
356 ; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
359 %cmp6 = icmp sgt i32 %n, 0
360 br i1 %cmp6, label %for.body, label %for.cond.cleanup
362 for.body: ; preds = %entry, %for.body
363 %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
364 %r.07 = phi i32 [ %add, %for.body ], [ 0, %entry ]
365 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.08
366 %0 = load i8, ptr %arrayidx, align 1
367 %conv = zext i8 %0 to i32
368 %add = add nuw nsw i32 %r.07, %conv
369 %inc = add nuw nsw i32 %i.08, 1
370 %exitcond = icmp eq i32 %inc, %n
371 br i1 %exitcond, label %for.cond.cleanup, label %for.body
373 for.cond.cleanup: ; preds = %for.body, %entry
374 %r.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
378 ; 8x to use VADDV.u16
379 define signext i16 @add_i16_i16(ptr nocapture readonly %x, i32 %n) #0 {
380 ; CHECK-LABEL: @add_i16_i16(
382 ; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N:%.*]], 0
383 ; CHECK-NEXT: br i1 [[CMP8]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
385 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 7
386 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -8
387 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
388 ; CHECK: vector.body:
389 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
390 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i16 [ 0, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
391 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
392 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
393 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP0]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> zeroinitializer)
394 ; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[WIDE_MASKED_LOAD]])
395 ; CHECK-NEXT: [[TMP2]] = add i16 [[TMP1]], [[VEC_PHI]]
396 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
397 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
398 ; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
399 ; CHECK: for.cond.cleanup:
400 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[VECTOR_BODY]] ]
401 ; CHECK-NEXT: ret i16 [[R_0_LCSSA]]
404 %cmp8 = icmp sgt i32 %n, 0
405 br i1 %cmp8, label %for.body, label %for.cond.cleanup
407 for.body: ; preds = %entry, %for.body
408 %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
409 %r.09 = phi i16 [ %add, %for.body ], [ 0, %entry ]
410 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.010
411 %0 = load i16, ptr %arrayidx, align 2
412 %add = add i16 %0, %r.09
413 %inc = add nuw nsw i32 %i.010, 1
414 %exitcond = icmp eq i32 %inc, %n
415 br i1 %exitcond, label %for.cond.cleanup, label %for.body
417 for.cond.cleanup: ; preds = %for.body, %entry
418 %r.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ]
422 ; 16x to use VADDV.u8
423 define signext i16 @add_i8_i16(ptr nocapture readonly %x, i32 %n) #0 {
424 ; CHECK-LABEL: @add_i8_i16(
426 ; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N:%.*]], 0
427 ; CHECK-NEXT: br i1 [[CMP8]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
429 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 15
430 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -16
431 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
432 ; CHECK: vector.body:
433 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
434 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i16 [ 0, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
435 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 [[N]])
436 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
437 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
438 ; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i16>
439 ; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i16> [[TMP1]], <16 x i16> zeroinitializer
440 ; CHECK-NEXT: [[TMP3:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP2]])
441 ; CHECK-NEXT: [[TMP4]] = add i16 [[TMP3]], [[VEC_PHI]]
442 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
443 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
444 ; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
445 ; CHECK: for.cond.cleanup:
446 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[TMP4]], [[VECTOR_BODY]] ]
447 ; CHECK-NEXT: ret i16 [[R_0_LCSSA]]
450 %cmp8 = icmp sgt i32 %n, 0
451 br i1 %cmp8, label %for.body, label %for.cond.cleanup
453 for.body: ; preds = %entry, %for.body
454 %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
455 %r.09 = phi i16 [ %add, %for.body ], [ 0, %entry ]
456 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.010
457 %0 = load i8, ptr %arrayidx, align 1
458 %conv = zext i8 %0 to i16
459 %add = add i16 %r.09, %conv
460 %inc = add nuw nsw i32 %i.010, 1
461 %exitcond = icmp eq i32 %inc, %n
462 br i1 %exitcond, label %for.cond.cleanup, label %for.body
464 for.cond.cleanup: ; preds = %for.body, %entry
465 %r.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ]
469 ; 16x to use VADDV.u8
470 define zeroext i8 @add_i8_i8(ptr nocapture readonly %x, i32 %n) #0 {
471 ; CHECK-LABEL: @add_i8_i8(
473 ; CHECK-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N:%.*]], 0
474 ; CHECK-NEXT: br i1 [[CMP7]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
476 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 15
477 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -16
478 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
479 ; CHECK: vector.body:
480 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
481 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i8 [ 0, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
482 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 [[N]])
483 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
484 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> zeroinitializer)
485 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[WIDE_MASKED_LOAD]])
486 ; CHECK-NEXT: [[TMP2]] = add i8 [[TMP1]], [[VEC_PHI]]
487 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
488 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
489 ; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
490 ; CHECK: for.cond.cleanup:
491 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[TMP2]], [[VECTOR_BODY]] ]
492 ; CHECK-NEXT: ret i8 [[R_0_LCSSA]]
495 %cmp7 = icmp sgt i32 %n, 0
496 br i1 %cmp7, label %for.body, label %for.cond.cleanup
498 for.body: ; preds = %entry, %for.body
499 %i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
500 %r.08 = phi i8 [ %add, %for.body ], [ 0, %entry ]
501 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.09
502 %0 = load i8, ptr %arrayidx, align 1
503 %add = add i8 %0, %r.08
504 %inc = add nuw nsw i32 %i.09, 1
505 %exitcond = icmp eq i32 %inc, %n
506 br i1 %exitcond, label %for.cond.cleanup, label %for.body
508 for.cond.cleanup: ; preds = %for.body, %entry
509 %r.0.lcssa = phi i8 [ 0, %entry ], [ %add, %for.body ]
514 define i64 @mla_i64_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
515 ; CHECK-LABEL: @mla_i64_i64(
517 ; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N:%.*]], 0
518 ; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
520 ; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
521 ; CHECK-NEXT: [[R_09:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
522 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[X:%.*]], i32 [[I_010]]
523 ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
524 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i64, ptr [[Y:%.*]], i32 [[I_010]]
525 ; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8
526 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP1]], [[TMP0]]
527 ; CHECK-NEXT: [[ADD]] = add nsw i64 [[MUL]], [[R_09]]
528 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_010]], 1
529 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
530 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]]
531 ; CHECK: for.cond.cleanup:
532 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD]], [[FOR_BODY]] ]
533 ; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
536 %cmp8 = icmp sgt i32 %n, 0
537 br i1 %cmp8, label %for.body, label %for.cond.cleanup
539 for.body: ; preds = %entry, %for.body
540 %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
541 %r.09 = phi i64 [ %add, %for.body ], [ 0, %entry ]
542 %arrayidx = getelementptr inbounds i64, ptr %x, i32 %i.010
543 %0 = load i64, ptr %arrayidx, align 8
544 %arrayidx1 = getelementptr inbounds i64, ptr %y, i32 %i.010
545 %1 = load i64, ptr %arrayidx1, align 8
546 %mul = mul nsw i64 %1, %0
547 %add = add nsw i64 %mul, %r.09
548 %inc = add nuw nsw i32 %i.010, 1
549 %exitcond = icmp eq i32 %inc, %n
550 br i1 %exitcond, label %for.cond.cleanup, label %for.body
552 for.cond.cleanup: ; preds = %for.body, %entry
553 %r.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
557 ; 4x to use VMLAL.u32
558 ; FIXME: TailPredicate
559 define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
560 ; CHECK-LABEL: @mla_i32_i64(
562 ; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N:%.*]], 0
563 ; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
564 ; CHECK: for.body.preheader:
565 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4
566 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
568 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483644
569 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
570 ; CHECK: vector.body:
571 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
572 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
573 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
574 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
575 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[Y:%.*]], i32 [[INDEX]]
576 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
577 ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
578 ; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
579 ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
580 ; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
581 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
582 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
583 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
584 ; CHECK: middle.block:
585 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
586 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
588 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
589 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
590 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
592 ; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
593 ; CHECK-NEXT: [[R_09:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
594 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i32 [[I_010]]
595 ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
596 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i32, ptr [[Y]], i32 [[I_010]]
597 ; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
598 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP7]]
599 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[MUL]] to i64
600 ; CHECK-NEXT: [[ADD]] = add nsw i64 [[R_09]], [[CONV]]
601 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_010]], 1
602 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
603 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
604 ; CHECK: for.cond.cleanup:
605 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
606 ; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
609 %cmp8 = icmp sgt i32 %n, 0
610 br i1 %cmp8, label %for.body, label %for.cond.cleanup
612 for.body: ; preds = %entry, %for.body
613 %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
614 %r.09 = phi i64 [ %add, %for.body ], [ 0, %entry ]
615 %arrayidx = getelementptr inbounds i32, ptr %x, i32 %i.010
616 %0 = load i32, ptr %arrayidx, align 4
617 %arrayidx1 = getelementptr inbounds i32, ptr %y, i32 %i.010
618 %1 = load i32, ptr %arrayidx1, align 4
619 %mul = mul nsw i32 %1, %0
620 %conv = sext i32 %mul to i64
621 %add = add nsw i64 %r.09, %conv
622 %inc = add nuw nsw i32 %i.010, 1
623 %exitcond = icmp eq i32 %inc, %n
624 br i1 %exitcond, label %for.cond.cleanup, label %for.body
626 for.cond.cleanup: ; preds = %for.body, %entry
627 %r.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
631 ; 8x to use VMLAL.u16
632 ; FIXME: TailPredicate
633 define i64 @mla_i16_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
634 ; CHECK-LABEL: @mla_i16_i64(
636 ; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[N:%.*]], 0
637 ; CHECK-NEXT: br i1 [[CMP10]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
638 ; CHECK: for.body.preheader:
639 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8
640 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
642 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483640
643 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
644 ; CHECK: vector.body:
645 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
646 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
647 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
648 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
649 ; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
650 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[INDEX]]
651 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i16>, ptr [[TMP2]], align 2
652 ; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_LOAD1]] to <8 x i32>
653 ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
654 ; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i32> [[TMP4]] to <8 x i64>
655 ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
656 ; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
657 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
658 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
659 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
660 ; CHECK: middle.block:
661 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
662 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
664 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
665 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
666 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
668 ; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
669 ; CHECK-NEXT: [[R_011:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
670 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[I_012]]
671 ; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
672 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
673 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i16, ptr [[Y]], i32 [[I_012]]
674 ; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
675 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP10]] to i32
676 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
677 ; CHECK-NEXT: [[CONV3:%.*]] = sext i32 [[MUL]] to i64
678 ; CHECK-NEXT: [[ADD]] = add nsw i64 [[R_011]], [[CONV3]]
679 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_012]], 1
680 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
681 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
682 ; CHECK: for.cond.cleanup:
683 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
684 ; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
687 %cmp10 = icmp sgt i32 %n, 0
688 br i1 %cmp10, label %for.body, label %for.cond.cleanup
690 for.body: ; preds = %entry, %for.body
691 %i.012 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
692 %r.011 = phi i64 [ %add, %for.body ], [ 0, %entry ]
693 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.012
694 %0 = load i16, ptr %arrayidx, align 2
695 %conv = sext i16 %0 to i32
696 %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %i.012
697 %1 = load i16, ptr %arrayidx1, align 2
698 %conv2 = sext i16 %1 to i32
699 %mul = mul nsw i32 %conv2, %conv
700 %conv3 = sext i32 %mul to i64
701 %add = add nsw i64 %r.011, %conv3
702 %inc = add nuw nsw i32 %i.012, 1
703 %exitcond = icmp eq i32 %inc, %n
704 br i1 %exitcond, label %for.cond.cleanup, label %for.body
706 for.cond.cleanup: ; preds = %for.body, %entry
707 %r.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
711 ; 8x to use VMLAL.u16
712 ; FIXME: TailPredicate
713 define i64 @mla_i8_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
714 ; CHECK-LABEL: @mla_i8_i64(
716 ; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[N:%.*]], 0
717 ; CHECK-NEXT: br i1 [[CMP10]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
718 ; CHECK: for.body.preheader:
719 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8
720 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
722 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483640
723 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
724 ; CHECK: vector.body:
725 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
726 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
727 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
728 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
729 ; CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
730 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i32 [[INDEX]]
731 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
732 ; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD1]] to <8 x i32>
733 ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <8 x i32> [[TMP3]], [[TMP1]]
734 ; CHECK-NEXT: [[TMP5:%.*]] = zext nneg <8 x i32> [[TMP4]] to <8 x i64>
735 ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
736 ; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
737 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
738 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
739 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
740 ; CHECK: middle.block:
741 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
742 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
744 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
745 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
746 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
748 ; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
749 ; CHECK-NEXT: [[R_011:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
750 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[X]], i32 [[I_012]]
751 ; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
752 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP9]] to i32
753 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[Y]], i32 [[I_012]]
754 ; CHECK-NEXT: [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
755 ; CHECK-NEXT: [[CONV2:%.*]] = zext i8 [[TMP10]] to i32
756 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[CONV2]], [[CONV]]
757 ; CHECK-NEXT: [[CONV3:%.*]] = zext nneg i32 [[MUL]] to i64
758 ; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[R_011]], [[CONV3]]
759 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_012]], 1
760 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
761 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
762 ; CHECK: for.cond.cleanup:
763 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
764 ; CHECK-NEXT: ret i64 [[R_0_LCSSA]]
767 %cmp10 = icmp sgt i32 %n, 0
768 br i1 %cmp10, label %for.body, label %for.cond.cleanup
770 for.body: ; preds = %entry, %for.body
771 %i.012 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
772 %r.011 = phi i64 [ %add, %for.body ], [ 0, %entry ]
773 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.012
774 %0 = load i8, ptr %arrayidx, align 1
775 %conv = zext i8 %0 to i32
776 %arrayidx1 = getelementptr inbounds i8, ptr %y, i32 %i.012
777 %1 = load i8, ptr %arrayidx1, align 1
778 %conv2 = zext i8 %1 to i32
779 %mul = mul nuw nsw i32 %conv2, %conv
780 %conv3 = zext i32 %mul to i64
781 %add = add nuw nsw i64 %r.011, %conv3
782 %inc = add nuw nsw i32 %i.012, 1
783 %exitcond = icmp eq i32 %inc, %n
784 br i1 %exitcond, label %for.cond.cleanup, label %for.body
786 for.cond.cleanup: ; preds = %for.body, %entry
787 %r.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
792 define i32 @mla_i32_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
793 ; CHECK-LABEL: @mla_i32_i32(
795 ; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[N:%.*]], 0
796 ; CHECK-NEXT: br i1 [[CMP8]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
798 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 3
799 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -4
800 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
801 ; CHECK: vector.body:
802 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
803 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
804 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]])
805 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
806 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP0]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
807 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[Y:%.*]], i32 [[INDEX]]
808 ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
809 ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]]
810 ; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP2]], <4 x i32> zeroinitializer
811 ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
812 ; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
813 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
814 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
815 ; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
816 ; CHECK: for.cond.cleanup:
817 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[VECTOR_BODY]] ]
818 ; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
821 %cmp8 = icmp sgt i32 %n, 0
822 br i1 %cmp8, label %for.body, label %for.cond.cleanup
824 for.body: ; preds = %entry, %for.body
825 %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
826 %r.09 = phi i32 [ %add, %for.body ], [ 0, %entry ]
827 %arrayidx = getelementptr inbounds i32, ptr %x, i32 %i.010
828 %0 = load i32, ptr %arrayidx, align 4
829 %arrayidx1 = getelementptr inbounds i32, ptr %y, i32 %i.010
830 %1 = load i32, ptr %arrayidx1, align 4
831 %mul = mul nsw i32 %1, %0
832 %add = add nsw i32 %mul, %r.09
833 %inc = add nuw nsw i32 %i.010, 1
834 %exitcond = icmp eq i32 %inc, %n
835 br i1 %exitcond, label %for.cond.cleanup, label %for.body
837 for.cond.cleanup: ; preds = %for.body, %entry
838 %r.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
843 define i32 @mla_i16_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
844 ; CHECK-LABEL: @mla_i16_i32(
846 ; CHECK-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
847 ; CHECK-NEXT: br i1 [[CMP9]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
849 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 7
850 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -8
851 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
852 ; CHECK: vector.body:
853 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
854 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
855 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
856 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
857 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP0]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
858 ; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD]] to <8 x i32>
859 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[INDEX]]
860 ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP2]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
861 ; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD1]] to <8 x i32>
862 ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
863 ; CHECK-NEXT: [[TMP5:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP4]], <8 x i32> zeroinitializer
864 ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP5]])
865 ; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
866 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
867 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
868 ; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
869 ; CHECK: for.cond.cleanup:
870 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP7]], [[VECTOR_BODY]] ]
871 ; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
874 %cmp9 = icmp sgt i32 %n, 0
875 br i1 %cmp9, label %for.body, label %for.cond.cleanup
877 for.body: ; preds = %entry, %for.body
878 %i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
879 %r.010 = phi i32 [ %add, %for.body ], [ 0, %entry ]
880 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.011
881 %0 = load i16, ptr %arrayidx, align 2
882 %conv = sext i16 %0 to i32
883 %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %i.011
884 %1 = load i16, ptr %arrayidx1, align 2
885 %conv2 = sext i16 %1 to i32
886 %mul = mul nsw i32 %conv2, %conv
887 %add = add nsw i32 %mul, %r.010
888 %inc = add nuw nsw i32 %i.011, 1
889 %exitcond = icmp eq i32 %inc, %n
890 br i1 %exitcond, label %for.cond.cleanup, label %for.body
892 for.cond.cleanup: ; preds = %for.body, %entry
893 %r.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
898 define i32 @mla_i8_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
899 ; CHECK-LABEL: @mla_i8_i32(
901 ; CHECK-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
902 ; CHECK-NEXT: br i1 [[CMP9]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
904 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 15
905 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -16
906 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
907 ; CHECK: vector.body:
908 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
909 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
910 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 [[N]])
911 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
912 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
913 ; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
914 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i32 [[INDEX]]
915 ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP2]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
916 ; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD1]] to <16 x i32>
917 ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <16 x i32> [[TMP3]], [[TMP1]]
918 ; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP4]], <16 x i32> zeroinitializer
919 ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
920 ; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
921 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
922 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
923 ; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
924 ; CHECK: for.cond.cleanup:
925 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP7]], [[VECTOR_BODY]] ]
926 ; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
929 %cmp9 = icmp sgt i32 %n, 0
930 br i1 %cmp9, label %for.body, label %for.cond.cleanup
932 for.body: ; preds = %entry, %for.body
933 %i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
934 %r.010 = phi i32 [ %add, %for.body ], [ 0, %entry ]
935 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.011
936 %0 = load i8, ptr %arrayidx, align 1
937 %conv = zext i8 %0 to i32
938 %arrayidx1 = getelementptr inbounds i8, ptr %y, i32 %i.011
939 %1 = load i8, ptr %arrayidx1, align 1
940 %conv2 = zext i8 %1 to i32
941 %mul = mul nuw nsw i32 %conv2, %conv
942 %add = add nuw nsw i32 %mul, %r.010
943 %inc = add nuw nsw i32 %i.011, 1
944 %exitcond = icmp eq i32 %inc, %n
945 br i1 %exitcond, label %for.cond.cleanup, label %for.body
947 for.cond.cleanup: ; preds = %for.body, %entry
948 %r.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
953 define signext i16 @mla_i16_i16(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
954 ; CHECK-LABEL: @mla_i16_i16(
956 ; CHECK-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[N:%.*]], 0
957 ; CHECK-NEXT: br i1 [[CMP11]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
959 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 7
960 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -8
961 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
962 ; CHECK: vector.body:
963 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
964 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i16 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
965 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
966 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
967 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP0]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
968 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[INDEX]]
969 ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP1]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
970 ; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i16> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]]
971 ; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> [[TMP2]], <8 x i16> zeroinitializer
972 ; CHECK-NEXT: [[TMP4:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP3]])
973 ; CHECK-NEXT: [[TMP5]] = add i16 [[TMP4]], [[VEC_PHI]]
974 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
975 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
976 ; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
977 ; CHECK: for.cond.cleanup:
978 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[VECTOR_BODY]] ]
979 ; CHECK-NEXT: ret i16 [[R_0_LCSSA]]
982 %cmp11 = icmp sgt i32 %n, 0
983 br i1 %cmp11, label %for.body, label %for.cond.cleanup
985 for.body: ; preds = %entry, %for.body
986 %i.013 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
987 %r.012 = phi i16 [ %add, %for.body ], [ 0, %entry ]
988 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.013
989 %0 = load i16, ptr %arrayidx, align 2
990 %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %i.013
991 %1 = load i16, ptr %arrayidx1, align 2
992 %mul = mul i16 %1, %0
993 %add = add i16 %mul, %r.012
994 %inc = add nuw nsw i32 %i.013, 1
995 %exitcond = icmp eq i32 %inc, %n
996 br i1 %exitcond, label %for.cond.cleanup, label %for.body
998 for.cond.cleanup: ; preds = %for.body, %entry
999 %r.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ]
1003 ; 16x to use VMLA.i8
1004 define signext i16 @mla_i8_i16(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
1005 ; CHECK-LABEL: @mla_i8_i16(
1006 ; CHECK-NEXT: entry:
1007 ; CHECK-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[N:%.*]], 0
1008 ; CHECK-NEXT: br i1 [[CMP11]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
1010 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 15
1011 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -16
1012 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1013 ; CHECK: vector.body:
1014 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1015 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i16 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
1016 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 [[N]])
1017 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
1018 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
1019 ; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i16>
1020 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i32 [[INDEX]]
1021 ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP2]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
1022 ; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD1]] to <16 x i16>
1023 ; CHECK-NEXT: [[TMP4:%.*]] = mul nuw <16 x i16> [[TMP3]], [[TMP1]]
1024 ; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i16> [[TMP4]], <16 x i16> zeroinitializer
1025 ; CHECK-NEXT: [[TMP6:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP5]])
1026 ; CHECK-NEXT: [[TMP7]] = add i16 [[TMP6]], [[VEC_PHI]]
1027 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
1028 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1029 ; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
1030 ; CHECK: for.cond.cleanup:
1031 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[TMP7]], [[VECTOR_BODY]] ]
1032 ; CHECK-NEXT: ret i16 [[R_0_LCSSA]]
1035 %cmp11 = icmp sgt i32 %n, 0
1036 br i1 %cmp11, label %for.body, label %for.cond.cleanup
1038 for.body: ; preds = %entry, %for.body
1039 %i.013 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
1040 %r.012 = phi i16 [ %add, %for.body ], [ 0, %entry ]
1041 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.013
1042 %0 = load i8, ptr %arrayidx, align 1
1043 %conv = zext i8 %0 to i16
1044 %arrayidx1 = getelementptr inbounds i8, ptr %y, i32 %i.013
1045 %1 = load i8, ptr %arrayidx1, align 1
1046 %conv2 = zext i8 %1 to i16
1047 %mul = mul nuw i16 %conv2, %conv
1048 %add = add i16 %mul, %r.012
1049 %inc = add nuw nsw i32 %i.013, 1
1050 %exitcond = icmp eq i32 %inc, %n
1051 br i1 %exitcond, label %for.cond.cleanup, label %for.body
1053 for.cond.cleanup: ; preds = %for.body, %entry
1054 %r.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ]
1058 ; 16x to use VMLA.i8
1059 define zeroext i8 @mla_i8_i8(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
1060 ; CHECK-LABEL: @mla_i8_i8(
1061 ; CHECK-NEXT: entry:
1062 ; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[N:%.*]], 0
1063 ; CHECK-NEXT: br i1 [[CMP10]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
1065 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 15
1066 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -16
1067 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1068 ; CHECK: vector.body:
1069 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1070 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i8 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
1071 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 [[N]])
1072 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
1073 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
1074 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[Y:%.*]], i32 [[INDEX]]
1075 ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP1]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
1076 ; CHECK-NEXT: [[TMP2:%.*]] = mul <16 x i8> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]]
1077 ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> [[TMP2]], <16 x i8> zeroinitializer
1078 ; CHECK-NEXT: [[TMP4:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[TMP3]])
1079 ; CHECK-NEXT: [[TMP5]] = add i8 [[TMP4]], [[VEC_PHI]]
1080 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
1081 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1082 ; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
1083 ; CHECK: for.cond.cleanup:
1084 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[VECTOR_BODY]] ]
1085 ; CHECK-NEXT: ret i8 [[R_0_LCSSA]]
1088 %cmp10 = icmp sgt i32 %n, 0
1089 br i1 %cmp10, label %for.body, label %for.cond.cleanup
1091 for.body: ; preds = %entry, %for.body
1092 %i.012 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
1093 %r.011 = phi i8 [ %add, %for.body ], [ 0, %entry ]
1094 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.012
1095 %0 = load i8, ptr %arrayidx, align 1
1096 %arrayidx1 = getelementptr inbounds i8, ptr %y, i32 %i.012
1097 %1 = load i8, ptr %arrayidx1, align 1
1098 %mul = mul i8 %1, %0
1099 %add = add i8 %mul, %r.011
1100 %inc = add nuw nsw i32 %i.012, 1
1101 %exitcond = icmp eq i32 %inc, %n
1102 br i1 %exitcond, label %for.cond.cleanup, label %for.body
1104 for.cond.cleanup: ; preds = %for.body, %entry
1105 %r.0.lcssa = phi i8 [ 0, %entry ], [ %add, %for.body ]
1109 ; 8x as different types
1110 define i32 @red_mla_ext_s8_s16_s32(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, i32 %n) #0 {
1111 ; CHECK-LABEL: @red_mla_ext_s8_s16_s32(
1112 ; CHECK-NEXT: entry:
1113 ; CHECK-NEXT: [[CMP9_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
1114 ; CHECK-NEXT: br i1 [[CMP9_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_PH:%.*]]
1116 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 7
1117 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -8
1118 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1119 ; CHECK: vector.body:
1120 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1121 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
1122 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 [[INDEX]], i32 [[N]])
1123 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i32 [[INDEX]]
1124 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[TMP0]], i32 1, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i8> poison)
1125 ; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i8> [[WIDE_MASKED_LOAD]] to <8 x i32>
1126 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i32 [[INDEX]]
1127 ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP2]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> poison)
1128 ; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD1]] to <8 x i32>
1129 ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
1130 ; CHECK-NEXT: [[TMP5:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP4]], <8 x i32> zeroinitializer
1131 ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP5]])
1132 ; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
1133 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8
1134 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1135 ; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
1136 ; CHECK: for.cond.cleanup:
1137 ; CHECK-NEXT: [[S_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP7]], [[VECTOR_BODY]] ]
1138 ; CHECK-NEXT: ret i32 [[S_0_LCSSA]]
1141 %cmp9.not = icmp eq i32 %n, 0
1142 br i1 %cmp9.not, label %for.cond.cleanup, label %for.body.preheader
1144 for.body.preheader: ; preds = %entry
1147 for.body: ; preds = %for.body.preheader, %for.body
1148 %i.011 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
1149 %s.010 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
1150 %arrayidx = getelementptr inbounds i8, ptr %A, i32 %i.011
1151 %0 = load i8, ptr %arrayidx, align 1
1152 %conv = sext i8 %0 to i32
1153 %arrayidx1 = getelementptr inbounds i16, ptr %B, i32 %i.011
1154 %1 = load i16, ptr %arrayidx1, align 2
1155 %conv2 = sext i16 %1 to i32
1156 %mul = mul nsw i32 %conv2, %conv
1157 %add = add nsw i32 %mul, %s.010
1158 %inc = add nuw i32 %i.011, 1
1159 %exitcond.not = icmp eq i32 %inc, %n
1160 br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
1162 for.cond.cleanup.loopexit: ; preds = %for.body
1163 %add.lcssa = phi i32 [ %add, %for.body ]
1164 br label %for.cond.cleanup
1166 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
1167 %s.0.lcssa = phi i32 [ 0, %entry ], [ %add.lcssa, %for.cond.cleanup.loopexit ]
1171 ; 4x as different sext vs zext
1172 define i64 @red_mla_ext_s16_u16_s64(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, i32 %n) #0 {
1173 ; CHECK-LABEL: @red_mla_ext_s16_u16_s64(
1174 ; CHECK-NEXT: entry:
1175 ; CHECK-NEXT: [[CMP9_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
1176 ; CHECK-NEXT: br i1 [[CMP9_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
1177 ; CHECK: for.body.preheader:
1178 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4
1179 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1181 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], -4
1182 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1183 ; CHECK: vector.body:
1184 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1185 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
1186 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[A:%.*]], i32 [[INDEX]]
1187 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 1
1188 ; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i32>
1189 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[B:%.*]], i32 [[INDEX]]
1190 ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2
1191 ; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[WIDE_LOAD1]] to <4 x i32>
1192 ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
1193 ; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i32> [[TMP4]] to <4 x i64>
1194 ; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP5]])
1195 ; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
1196 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
1197 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1198 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
1199 ; CHECK: middle.block:
1200 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
1201 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
1203 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1204 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1205 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1207 ; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1208 ; CHECK-NEXT: [[S_010:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1209 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[A]], i32 [[I_011]]
1210 ; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 1
1211 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
1212 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[I_011]]
1213 ; CHECK-NEXT: [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
1214 ; CHECK-NEXT: [[CONV2:%.*]] = zext i16 [[TMP10]] to i32
1215 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
1216 ; CHECK-NEXT: [[MUL2:%.*]] = zext i32 [[MUL]] to i64
1217 ; CHECK-NEXT: [[ADD]] = add nsw i64 [[S_010]], [[MUL2]]
1218 ; CHECK-NEXT: [[INC]] = add nuw i32 [[I_011]], 1
1219 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
1220 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
1221 ; CHECK: for.cond.cleanup:
1222 ; CHECK-NEXT: [[S_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
1223 ; CHECK-NEXT: ret i64 [[S_0_LCSSA]]
1226 %cmp9.not = icmp eq i32 %n, 0
1227 br i1 %cmp9.not, label %for.cond.cleanup, label %for.body.preheader
1229 for.body.preheader: ; preds = %entry
1232 for.body: ; preds = %for.body.preheader, %for.body
1233 %i.011 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
1234 %s.010 = phi i64 [ %add, %for.body ], [ 0, %for.body.preheader ]
1235 %arrayidx = getelementptr inbounds i16, ptr %A, i32 %i.011
1236 %0 = load i16, ptr %arrayidx, align 1
1237 %conv = sext i16 %0 to i32
1238 %arrayidx1 = getelementptr inbounds i16, ptr %B, i32 %i.011
1239 %1 = load i16, ptr %arrayidx1, align 2
1240 %conv2 = zext i16 %1 to i32
1241 %mul = mul nsw i32 %conv2, %conv
1242 %mul2 = zext i32 %mul to i64
1243 %add = add nsw i64 %mul2, %s.010
1244 %inc = add nuw i32 %i.011, 1
1245 %exitcond.not = icmp eq i32 %inc, %n
1246 br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
1248 for.cond.cleanup.loopexit: ; preds = %for.body
1249 %add.lcssa = phi i64 [ %add, %for.body ]
1250 br label %for.cond.cleanup
1252 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
1253 %s.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa, %for.cond.cleanup.loopexit ]
1257 ; 4x as different sext vs zext
1258 define i32 @red_mla_u8_s8_u32(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, i32 %n) #0 {
1259 ; CHECK-LABEL: @red_mla_u8_s8_u32(
1260 ; CHECK-NEXT: entry:
1261 ; CHECK-NEXT: [[CMP9_NOT:%.*]] = icmp eq i32 [[N:%.*]], 0
1262 ; CHECK-NEXT: br i1 [[CMP9_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_PH:%.*]]
1264 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 3
1265 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -4
1266 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1267 ; CHECK: vector.body:
1268 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1269 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
1270 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[INDEX]], i32 [[N]])
1271 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i32 [[INDEX]]
1272 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[TMP0]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i8> poison)
1273 ; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[WIDE_MASKED_LOAD]] to <4 x i32>
1274 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i32 [[INDEX]]
1275 ; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[TMP2]], i32 1, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i8> poison)
1276 ; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i8> [[WIDE_MASKED_LOAD1]] to <4 x i32>
1277 ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
1278 ; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP4]], <4 x i32> zeroinitializer
1279 ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
1280 ; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
1281 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
1282 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1283 ; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
1284 ; CHECK: for.cond.cleanup:
1285 ; CHECK-NEXT: [[S_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP7]], [[VECTOR_BODY]] ]
1286 ; CHECK-NEXT: ret i32 [[S_0_LCSSA]]
1289 %cmp9.not = icmp eq i32 %n, 0
1290 br i1 %cmp9.not, label %for.cond.cleanup, label %for.body.preheader
1292 for.body.preheader: ; preds = %entry
1295 for.body: ; preds = %for.body.preheader, %for.body
1296 %i.011 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
1297 %s.010 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
1298 %arrayidx = getelementptr inbounds i8, ptr %A, i32 %i.011
1299 %0 = load i8, ptr %arrayidx, align 1
1300 %conv = zext i8 %0 to i32
1301 %arrayidx1 = getelementptr inbounds i8, ptr %B, i32 %i.011
1302 %1 = load i8, ptr %arrayidx1, align 1
1303 %conv2 = sext i8 %1 to i32
1304 %mul = mul nsw i32 %conv2, %conv
1305 %add = add i32 %mul, %s.010
1306 %inc = add nuw i32 %i.011, 1
1307 %exitcond.not = icmp eq i32 %inc, %n
1308 br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
1310 for.cond.cleanup.loopexit: ; preds = %for.body
1311 %add.lcssa = phi i32 [ %add, %for.body ]
1312 br label %for.cond.cleanup
1314 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
1315 %s.0.lcssa = phi i32 [ 0, %entry ], [ %add.lcssa, %for.cond.cleanup.loopexit ]
1319 ; Make sure interleave group members feeding in-loop reductions can be handled.
1320 define i32 @reduction_interleave_group(i32 %n, ptr %arr) #0 {
1321 ; CHECK-LABEL: @reduction_interleave_group(
1322 ; CHECK-NEXT: entry:
1323 ; CHECK-NEXT: [[GUARD:%.*]] = icmp sgt i32 [[N:%.*]], 0
1324 ; CHECK-NEXT: br i1 [[GUARD]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]]
1325 ; CHECK: for.body.preheader:
1326 ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i32 [[N]], -1
1327 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 1
1328 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw i32 [[TMP1]], 1
1329 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 7
1330 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1332 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[TMP2]], -4
1333 ; CHECK-NEXT: [[IND_END:%.*]] = shl i32 [[N_VEC]], 1
1334 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1335 ; CHECK: vector.body:
1336 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1337 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
1338 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
1339 ; CHECK-NEXT: [[TMP3:%.*]] = or disjoint i32 [[OFFSET_IDX]], 1
1340 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[ARR:%.*]], i32 [[TMP3]]
1341 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 -4
1342 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP5]], align 4
1343 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
1344 ; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
1345 ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[STRIDED_VEC1]])
1346 ; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], [[VEC_PHI]]
1347 ; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[STRIDED_VEC]])
1348 ; CHECK-NEXT: [[TMP9]] = add i32 [[TMP8]], [[TMP7]]
1349 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
1350 ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1351 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
1352 ; CHECK: middle.block:
1353 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]]
1354 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT]], label [[SCALAR_PH]]
1356 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1357 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1358 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1360 ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1361 ; CHECK-NEXT: [[RED_PHI:%.*]] = phi i32 [ [[RED_2:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1362 ; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[IV]], 1
1363 ; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i32 [[ADD]]
1364 ; CHECK-NEXT: [[L_0:%.*]] = load i32, ptr [[GEP_0]], align 4
1365 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, ptr [[ARR]], i32 [[IV]]
1366 ; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[GEP_1]], align 4
1367 ; CHECK-NEXT: [[RED_1:%.*]] = add i32 [[L_0]], [[RED_PHI]]
1368 ; CHECK-NEXT: [[RED_2]] = add i32 [[RED_1]], [[L_1]]
1369 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 2
1370 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N]]
1371 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT]], !llvm.loop [[LOOP31:![0-9]+]]
1373 ; CHECK-NEXT: [[RET_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[RED_2]], [[FOR_BODY]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ]
1374 ; CHECK-NEXT: ret i32 [[RET_LCSSA]]
1377 %guard = icmp sgt i32 %n, 0
1378 br i1 %guard , label %for.body, label %exit
1380 for.body: ; preds = %for.body.preheader, %for.body
1381 %iv = phi i32 [ %iv.next, %for.body ], [ 0, %entry ]
1382 %red.phi = phi i32 [ %red.2, %for.body ], [ 0, %entry ]
1383 %add = or disjoint i32 %iv, 1
1384 %gep.0 = getelementptr inbounds i32, ptr %arr, i32 %add
1385 %l.0 = load i32, ptr %gep.0, align 4
1386 %gep.1 = getelementptr inbounds i32, ptr %arr, i32 %iv
1387 %l.1 = load i32, ptr %gep.1, align 4
1388 %red.1 = add i32 %l.0, %red.phi
1389 %red.2 = add i32 %red.1, %l.1
1390 %iv.next = add nuw nsw i32 %iv, 2
1391 %cmp = icmp slt i32 %iv.next, %n
1392 br i1 %cmp, label %for.body, label %exit
1395 %ret.lcssa = phi i32 [ 0, %entry ], [ %red.2, %for.body ]
1399 ; 16x to use VMLA.i8, same as mla_i8_i32 with multiple uses of the ext `add(mul(x, x))`
1400 define i32 @mla_i8_i32_multiuse(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) #0 {
1401 ; CHECK-LABEL: @mla_i8_i32_multiuse(
1402 ; CHECK-NEXT: entry:
1403 ; CHECK-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
1404 ; CHECK-NEXT: br i1 [[CMP9]], label [[VECTOR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
1406 ; CHECK-NEXT: [[N_RND_UP:%.*]] = add nuw i32 [[N]], 15
1407 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -16
1408 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1409 ; CHECK: vector.body:
1410 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1411 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
1412 ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 [[INDEX]], i32 [[N]])
1413 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
1414 ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
1415 ; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
1416 ; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i32> [[TMP1]], [[TMP1]]
1417 ; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP2]], <16 x i32> zeroinitializer
1418 ; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP3]])
1419 ; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
1420 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
1421 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1422 ; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
1423 ; CHECK: for.cond.cleanup:
1424 ; CHECK-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TMP5]], [[VECTOR_BODY]] ]
1425 ; CHECK-NEXT: ret i32 [[R_0_LCSSA]]
1428 %cmp9 = icmp sgt i32 %n, 0
1429 br i1 %cmp9, label %for.body, label %for.cond.cleanup
1431 for.body: ; preds = %entry, %for.body
1432 %i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
1433 %r.010 = phi i32 [ %add, %for.body ], [ 0, %entry ]
1434 %arrayidx = getelementptr inbounds i8, ptr %x, i32 %i.011
1435 %0 = load i8, ptr %arrayidx, align 1
1436 %conv = zext i8 %0 to i32
1437 %mul = mul nuw nsw i32 %conv, %conv
1438 %add = add nuw nsw i32 %mul, %r.010
1439 %inc = add nuw nsw i32 %i.011, 1
1440 %exitcond = icmp eq i32 %inc, %n
1441 br i1 %exitcond, label %for.cond.cleanup, label %for.body
1443 for.cond.cleanup: ; preds = %for.body, %entry
1444 %r.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
1448 define i64 @mla_xx_sext_zext(ptr nocapture noundef readonly %x, i32 %n) #0 {
1449 ; CHECK-LABEL: @mla_xx_sext_zext(
1450 ; CHECK-NEXT: entry:
1451 ; CHECK-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[N:%.*]], 0
1452 ; CHECK-NEXT: br i1 [[CMP9]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
1453 ; CHECK: for.body.preheader:
1454 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8
1455 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1457 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483640
1458 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1459 ; CHECK: vector.body:
1460 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1461 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
1462 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
1463 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
1464 ; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
1465 ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i32> [[TMP1]], [[TMP1]]
1466 ; CHECK-NEXT: [[TMP3:%.*]] = zext nneg <8 x i32> [[TMP2]] to <8 x i64>
1467 ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
1468 ; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
1469 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
1470 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1471 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
1472 ; CHECK: middle.block:
1473 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
1474 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
1476 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1477 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1478 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1479 ; CHECK: for.cond.cleanup:
1480 ; CHECK-NEXT: [[S_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
1481 ; CHECK-NEXT: ret i64 [[S_0_LCSSA]]
1483 ; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1484 ; CHECK-NEXT: [[S_010:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1485 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[I_011]]
1486 ; CHECK-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
1487 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP7]] to i32
1488 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV]], [[CONV]]
1489 ; CHECK-NEXT: [[CONV3:%.*]] = zext nneg i32 [[MUL]] to i64
1490 ; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[S_010]], [[CONV3]]
1491 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_011]], 1
1492 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
1493 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
1496 %cmp9 = icmp sgt i32 %n, 0
1497 br i1 %cmp9, label %for.body, label %for.cond.cleanup
1499 for.cond.cleanup: ; preds = %for.body, %entry
1500 %s.0.lcssa = phi i64 [ 0, %entry ], [ %add, %for.body ]
1503 for.body: ; preds = %entry, %for.body
1504 %i.011 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
1505 %s.010 = phi i64 [ %add, %for.body ], [ 0, %entry ]
1506 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.011
1507 %0 = load i16, ptr %arrayidx, align 2
1508 %conv = sext i16 %0 to i32
1509 %mul = mul nsw i32 %conv, %conv
1510 %conv3 = zext nneg i32 %mul to i64
1511 %add = add nuw nsw i64 %s.010, %conv3
1512 %inc = add nuw nsw i32 %i.011, 1
1513 %exitcond.not = icmp eq i32 %inc, %n
1514 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
1517 define i64 @mla_and_add_together_16_64(ptr nocapture noundef readonly %x, i32 noundef %n) #0 {
1518 ; CHECK-LABEL: @mla_and_add_together_16_64(
1519 ; CHECK-NEXT: entry:
1520 ; CHECK-NEXT: [[CMP16:%.*]] = icmp sgt i32 [[N:%.*]], 0
1521 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP16]])
1522 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp samesign ult i32 [[N]], 8
1523 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1525 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[N]], 2147483640
1526 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1527 ; CHECK: vector.body:
1528 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1529 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
1530 ; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ]
1531 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
1532 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
1533 ; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
1534 ; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i32> [[TMP1]], [[TMP1]]
1535 ; CHECK-NEXT: [[TMP3:%.*]] = zext nneg <8 x i32> [[TMP2]] to <8 x i64>
1536 ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
1537 ; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
1538 ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
1539 ; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
1540 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
1541 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1542 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
1543 ; CHECK: middle.block:
1544 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
1545 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
1547 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
1548 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
1549 ; CHECK-NEXT: [[BC_MERGE_RDX2:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
1550 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1551 ; CHECK: for.cond.cleanup:
1552 ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ]
1553 ; CHECK-NEXT: [[ADD6_LCSSA:%.*]] = phi i32 [ [[ADD6:%.*]], [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
1554 ; CHECK-NEXT: [[CONV7:%.*]] = sext i32 [[ADD6_LCSSA]] to i64
1555 ; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[CONV7]], [[ADD_LCSSA]]
1556 ; CHECK-NEXT: ret i64 [[DIV]]
1558 ; CHECK-NEXT: [[I_019:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1559 ; CHECK-NEXT: [[T_018:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1560 ; CHECK-NEXT: [[S_017:%.*]] = phi i32 [ [[ADD6]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ]
1561 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[I_019]]
1562 ; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
1563 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
1564 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV]], [[CONV]]
1565 ; CHECK-NEXT: [[CONV3:%.*]] = zext nneg i32 [[MUL]] to i64
1566 ; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[T_018]], [[CONV3]]
1567 ; CHECK-NEXT: [[ADD6]] = add nsw i32 [[S_017]], [[CONV]]
1568 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_019]], 1
1569 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
1570 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
1573 %cmp16 = icmp sgt i32 %n, 0
1574 tail call void @llvm.assume(i1 %cmp16)
1578 %conv7 = sext i32 %add6 to i64
1579 %div = sdiv i64 %conv7, %add
1583 %i.019 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
1584 %t.018 = phi i64 [ %add, %for.body ], [ 0, %entry ]
1585 %s.017 = phi i32 [ %add6, %for.body ], [ 0, %entry ]
1586 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.019
1587 %0 = load i16, ptr %arrayidx, align 2
1588 %conv = sext i16 %0 to i32
1589 %mul = mul nsw i32 %conv, %conv
1590 %conv3 = zext nneg i32 %mul to i64
1591 %add = add nuw nsw i64 %t.018, %conv3
1592 %add6 = add nsw i32 %s.017, %conv
1593 %inc = add nuw nsw i32 %i.019, 1
1594 %exitcond.not = icmp eq i32 %inc, %n
1595 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
1598 define i64 @interleave_doublereduct_i16_i64(ptr %x, ptr %y, i32 %n) {
1599 ; CHECK-LABEL: @interleave_doublereduct_i16_i64(
1600 ; CHECK-NEXT: entry:
1601 ; CHECK-NEXT: [[CMP23:%.*]] = icmp sgt i32 [[N:%.*]], 0
1602 ; CHECK-NEXT: br i1 [[CMP23]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
1603 ; CHECK: for.cond.cleanup:
1604 ; CHECK-NEXT: [[T_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD12:%.*]], [[FOR_BODY]] ]
1605 ; CHECK-NEXT: ret i64 [[T_0_LCSSA]]
1607 ; CHECK-NEXT: [[I_025:%.*]] = phi i32 [ [[ADD13:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
1608 ; CHECK-NEXT: [[T_024:%.*]] = phi i64 [ [[ADD12]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ]
1609 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i32 [[I_025]]
1610 ; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
1611 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
1612 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i32 [[I_025]]
1613 ; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
1614 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP1]] to i32
1615 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
1616 ; CHECK-NEXT: [[CONV3:%.*]] = sext i32 [[MUL]] to i64
1617 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[T_024]], [[CONV3]]
1618 ; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_025]], 1
1619 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i16, ptr [[X]], i32 [[ADD4]]
1620 ; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2
1621 ; CHECK-NEXT: [[CONV6:%.*]] = sext i16 [[TMP2]] to i32
1622 ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw i16, ptr [[Y]], i32 [[ADD4]]
1623 ; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2
1624 ; CHECK-NEXT: [[CONV9:%.*]] = sext i16 [[TMP3]] to i32
1625 ; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[CONV9]], [[CONV6]]
1626 ; CHECK-NEXT: [[CONV11:%.*]] = sext i32 [[MUL10]] to i64
1627 ; CHECK-NEXT: [[ADD12]] = add nsw i64 [[ADD]], [[CONV11]]
1628 ; CHECK-NEXT: [[ADD13]] = add nuw nsw i32 [[I_025]], 2
1629 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD13]], [[N]]
1630 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]]
1633 %cmp23 = icmp sgt i32 %n, 0
1634 br i1 %cmp23, label %for.body, label %for.cond.cleanup
1637 %t.0.lcssa = phi i64 [ 0, %entry ], [ %add12, %for.body ]
1641 %i.025 = phi i32 [ %add13, %for.body ], [ 0, %entry ]
1642 %t.024 = phi i64 [ %add12, %for.body ], [ 0, %entry ]
1643 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.025
1644 %0 = load i16, ptr %arrayidx, align 2
1645 %conv = sext i16 %0 to i32
1646 %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %i.025
1647 %1 = load i16, ptr %arrayidx1, align 2
1648 %conv2 = sext i16 %1 to i32
1649 %mul = mul nsw i32 %conv2, %conv
1650 %conv3 = sext i32 %mul to i64
1651 %add = add nsw i64 %t.024, %conv3
1652 %add4 = or disjoint i32 %i.025, 1
1653 %arrayidx5 = getelementptr inbounds i16, ptr %x, i32 %add4
1654 %2 = load i16, ptr %arrayidx5, align 2
1655 %conv6 = sext i16 %2 to i32
1656 %arrayidx8 = getelementptr inbounds i16, ptr %y, i32 %add4
1657 %3 = load i16, ptr %arrayidx8, align 2
1658 %conv9 = sext i16 %3 to i32
1659 %mul10 = mul nsw i32 %conv9, %conv6
1660 %conv11 = sext i32 %mul10 to i64
1661 %add12 = add nsw i64 %add, %conv11
1662 %add13 = add nuw nsw i32 %i.025, 2
1663 %cmp = icmp slt i32 %add13, %n
1664 br i1 %cmp, label %for.body, label %for.cond.cleanup
1667 define i64 @test_std_q31(ptr %x, i32 %n) #0 {
1668 ; CHECK-LABEL: @test_std_q31(
1669 ; CHECK-NEXT: entry:
1670 ; CHECK-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[N:%.*]], 0
1671 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP11]])
1672 ; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
1673 ; CHECK: for.cond.cleanup:
1674 ; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[ADD3:%.*]], [[ADD:%.*]]
1675 ; CHECK-NEXT: ret i64 [[DIV]]
1677 ; CHECK-NEXT: [[S_014:%.*]] = phi i64 [ [[ADD]], [[FOR_BODY1]] ], [ 0, [[ENTRY:%.*]] ]
1678 ; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[ADD4:%.*]], [[FOR_BODY1]] ], [ 0, [[ENTRY]] ]
1679 ; CHECK-NEXT: [[T_012:%.*]] = phi i64 [ [[ADD3]], [[FOR_BODY1]] ], [ 0, [[ENTRY]] ]
1680 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[X:%.*]], i32 [[I_013]]
1681 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
1682 ; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[TMP0]], 8
1683 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[SHR]] to i64
1684 ; CHECK-NEXT: [[ADD]] = add nsw i64 [[S_014]], [[CONV]]
1685 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV]]
1686 ; CHECK-NEXT: [[ADD3]] = add nuw nsw i64 [[MUL]], [[T_012]]
1687 ; CHECK-NEXT: [[ADD4]] = add nuw nsw i32 [[I_013]], 1
1688 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[ADD4]], [[N]]
1689 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY1]]
1692 %cmp11 = icmp sgt i32 %n, 0
1693 tail call void @llvm.assume(i1 %cmp11)
1696 for.cond.cleanup: ; preds = %for.body
1697 %div = sdiv i64 %add3, %add
1700 for.body: ; preds = %entry, %for.body
1701 %s.014 = phi i64 [ %add, %for.body ], [ 0, %entry ]
1702 %i.013 = phi i32 [ %add4, %for.body ], [ 0, %entry ]
1703 %t.012 = phi i64 [ %add3, %for.body ], [ 0, %entry ]
1704 %arrayidx = getelementptr inbounds i32, ptr %x, i32 %i.013
1705 %0 = load i32, ptr %arrayidx, align 4
1706 %shr = ashr i32 %0, 8
1707 %conv = sext i32 %shr to i64
1708 %add = add nsw i64 %s.014, %conv
1709 %mul = mul nsw i64 %conv, %conv
1710 %add3 = add nuw nsw i64 %mul, %t.012
1711 %add4 = add nuw nsw i32 %i.013, 1
1712 %exitcond.not = icmp eq i32 %add4, %n
1713 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
1716 define i64 @test_fir_q15(ptr %x, ptr %y, i32 %n) #0 {
1717 ; CHECK-LABEL: @test_fir_q15(
1718 ; CHECK-NEXT: entry:
1719 ; CHECK-NEXT: [[CMP23:%.*]] = icmp sgt i32 [[N:%.*]], 0
1720 ; CHECK-NEXT: br i1 [[CMP23]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
1721 ; CHECK: for.body.preheader:
1722 ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i32 [[N]], -1
1723 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[TMP0]], 1
1724 ; CHECK-NEXT: [[TMP2:%.*]] = add nuw i32 [[TMP1]], 1
1725 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 7
1726 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
1728 ; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[TMP2]], -4
1729 ; CHECK-NEXT: [[IND_END:%.*]] = shl i32 [[N_VEC]], 1
1730 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
1731 ; CHECK: vector.body:
1732 ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
1733 ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
1734 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
1735 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[OFFSET_IDX]]
1736 ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2
1737 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i16> [[WIDE_VEC]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
1738 ; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i16> [[WIDE_VEC]], <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
1739 ; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i16> [[STRIDED_VEC]] to <4 x i32>
1740 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i32 [[OFFSET_IDX]]
1741 ; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <8 x i16>, ptr [[TMP4]], align 2
1742 ; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <8 x i16> [[WIDE_VEC2]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
1743 ; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i16> [[WIDE_VEC2]], <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
1744 ; CHECK-NEXT: [[TMP6:%.*]] = sext <4 x i16> [[STRIDED_VEC3]] to <4 x i32>
1745 ; CHECK-NEXT: [[TMP7:%.*]] = mul nsw <4 x i32> [[TMP6]], [[TMP5]]
1746 ; CHECK-NEXT: [[TMP8:%.*]] = sext <4 x i32> [[TMP7]] to <4 x i64>
1747 ; CHECK-NEXT: [[TMP13:%.*]] = sext <4 x i16> [[STRIDED_VEC1]] to <4 x i32>
1748 ; CHECK-NEXT: [[TMP14:%.*]] = sext <4 x i16> [[STRIDED_VEC4]] to <4 x i32>
1749 ; CHECK-NEXT: [[TMP11:%.*]] = mul nsw <4 x i32> [[TMP14]], [[TMP13]]
1750 ; CHECK-NEXT: [[TMP12:%.*]] = sext <4 x i32> [[TMP11]] to <4 x i64>
1751 ; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP8]])
1752 ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], [[VEC_PHI]]
1753 ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP12]])
1754 ; CHECK-NEXT: [[TMP16]] = add i64 [[TMP15]], [[TMP10]]
1755 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
1756 ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
1757 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
1758 ; CHECK: middle.block:
1759 ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]]
1760 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]]
1762 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1763 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP16]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
1764 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1765 ; CHECK: for.cond.cleanup:
1766 ; CHECK-NEXT: [[S_0_LCSSA:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD12:%.*]], [[FOR_BODY]] ], [ [[TMP16]], [[MIDDLE_BLOCK]] ]
1767 ; CHECK-NEXT: ret i64 [[S_0_LCSSA]]
1769 ; CHECK-NEXT: [[I_025:%.*]] = phi i32 [ [[ADD13:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
1770 ; CHECK-NEXT: [[S_024:%.*]] = phi i64 [ [[ADD12]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
1771 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_025]]
1772 ; CHECK-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
1773 ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32
1774 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, ptr [[Y]], i32 [[I_025]]
1775 ; CHECK-NEXT: [[TMP19:%.*]] = load i16, ptr [[ARRAYIDX1]], align 2
1776 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP19]] to i32
1777 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[CONV2]], [[CONV]]
1778 ; CHECK-NEXT: [[CONV3:%.*]] = sext i32 [[MUL]] to i64
1779 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[S_024]], [[CONV3]]
1780 ; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_025]], 1
1781 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[ADD4]]
1782 ; CHECK-NEXT: [[TMP20:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2
1783 ; CHECK-NEXT: [[CONV6:%.*]] = sext i16 [[TMP20]] to i32
1784 ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[Y]], i32 [[ADD4]]
1785 ; CHECK-NEXT: [[TMP21:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2
1786 ; CHECK-NEXT: [[CONV9:%.*]] = sext i16 [[TMP21]] to i32
1787 ; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[CONV9]], [[CONV6]]
1788 ; CHECK-NEXT: [[CONV11:%.*]] = sext i32 [[MUL10]] to i64
1789 ; CHECK-NEXT: [[ADD12]] = add nsw i64 [[ADD]], [[CONV11]]
1790 ; CHECK-NEXT: [[ADD13]] = add nuw nsw i32 [[I_025]], 2
1791 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD13]], [[N]]
1792 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]], !llvm.loop [[LOOP38:![0-9]+]]
1795 %cmp23 = icmp sgt i32 %n, 0
1796 br i1 %cmp23, label %for.body, label %for.cond.cleanup
1798 for.cond.cleanup: ; preds = %for.body, %entry
1799 %s.0.lcssa = phi i64 [ 0, %entry ], [ %add12, %for.body ]
1802 for.body: ; preds = %entry, %for.body
1803 %i.025 = phi i32 [ %add13, %for.body ], [ 0, %entry ]
1804 %s.024 = phi i64 [ %add12, %for.body ], [ 0, %entry ]
1805 %arrayidx = getelementptr inbounds i16, ptr %x, i32 %i.025
1806 %0 = load i16, ptr %arrayidx, align 2
1807 %conv = sext i16 %0 to i32
1808 %arrayidx1 = getelementptr inbounds i16, ptr %y, i32 %i.025
1809 %1 = load i16, ptr %arrayidx1, align 2
1810 %conv2 = sext i16 %1 to i32
1811 %mul = mul nsw i32 %conv2, %conv
1812 %conv3 = sext i32 %mul to i64
1813 %add = add nsw i64 %s.024, %conv3
1814 %add4 = or disjoint i32 %i.025, 1
1815 %arrayidx5 = getelementptr inbounds i16, ptr %x, i32 %add4
1816 %2 = load i16, ptr %arrayidx5, align 2
1817 %conv6 = sext i16 %2 to i32
1818 %arrayidx8 = getelementptr inbounds i16, ptr %y, i32 %add4
1819 %3 = load i16, ptr %arrayidx8, align 2
1820 %conv9 = sext i16 %3 to i32
1821 %mul10 = mul nsw i32 %conv9, %conv6
1822 %conv11 = sext i32 %mul10 to i64
1823 %add12 = add nsw i64 %add, %conv11
1824 %add13 = add nuw nsw i32 %i.025, 2
1825 %cmp = icmp slt i32 %add13, %n
1826 br i1 %cmp, label %for.body, label %for.cond.cleanup
1830 attributes #0 = { "target-features"="+mve" }