1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=armv8.1m.main -mattr=+mve -enable-arm-maskedldst=true -disable-mve-tail-predication=false --verify-machineinstrs %s -o - | FileCheck %s
4 define dso_local i32 @mul_reduce_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %N) {
5 ; CHECK-LABEL: mul_reduce_add:
6 ; CHECK: @ %bb.0: @ %entry
7 ; CHECK-NEXT: cmp r2, #0
9 ; CHECK-NEXT: moveq r0, #0
11 ; CHECK-NEXT: push {r7, lr}
12 ; CHECK-NEXT: adds r3, r2, #3
13 ; CHECK-NEXT: vmov.i32 q0, #0x0
14 ; CHECK-NEXT: bic r3, r3, #3
15 ; CHECK-NEXT: sub.w r12, r3, #4
16 ; CHECK-NEXT: movs r3, #1
17 ; CHECK-NEXT: add.w lr, r3, r12, lsr #2
18 ; CHECK-NEXT: dls lr, lr
19 ; CHECK-NEXT: .LBB0_1: @ %vector.body
20 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
21 ; CHECK-NEXT: vmov q1, q0
22 ; CHECK-NEXT: vctp.32 r2
24 ; CHECK-NEXT: vldrwt.u32 q0, [r0]
25 ; CHECK-NEXT: vldrwt.u32 q2, [r1]
26 ; CHECK-NEXT: mov r3, r2
27 ; CHECK-NEXT: vmul.i32 q0, q2, q0
28 ; CHECK-NEXT: adds r0, #16
29 ; CHECK-NEXT: adds r1, #16
30 ; CHECK-NEXT: subs r2, #4
31 ; CHECK-NEXT: vadd.i32 q0, q0, q1
32 ; CHECK-NEXT: le lr, .LBB0_1
33 ; CHECK-NEXT: @ %bb.2: @ %middle.block
34 ; CHECK-NEXT: vctp.32 r3
35 ; CHECK-NEXT: vpsel q0, q0, q1
36 ; CHECK-NEXT: vaddv.u32 r0, q0
37 ; CHECK-NEXT: pop {r7, pc}
39 %cmp8 = icmp eq i32 %N, 0
40 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
42 vector.ph: ; preds = %entry
43 %n.rnd.up = add i32 %N, 3
44 %n.vec = and i32 %n.rnd.up, -4
45 %trip.count.minus.1 = add i32 %N, -1
46 %broadcast.splatinsert11 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
47 %broadcast.splat12 = shufflevector <4 x i32> %broadcast.splatinsert11, <4 x i32> undef, <4 x i32> zeroinitializer
50 vector.body: ; preds = %vector.body, %vector.ph
51 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
52 %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %6, %vector.body ]
53 %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
54 %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
55 %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
56 %0 = getelementptr inbounds i32, i32* %a, i32 %index
57 %1 = icmp ule <4 x i32> %induction, %broadcast.splat12
58 %2 = bitcast i32* %0 to <4 x i32>*
59 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %1, <4 x i32> undef)
60 %3 = getelementptr inbounds i32, i32* %b, i32 %index
61 %4 = bitcast i32* %3 to <4 x i32>*
62 %wide.masked.load13 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %4, i32 4, <4 x i1> %1, <4 x i32> undef)
63 %5 = mul nsw <4 x i32> %wide.masked.load13, %wide.masked.load
64 %6 = add nsw <4 x i32> %5, %vec.phi
65 %index.next = add i32 %index, 4
66 %7 = icmp eq i32 %index.next, %n.vec
67 br i1 %7, label %middle.block, label %vector.body
69 middle.block: ; preds = %vector.body
70 %8 = select <4 x i1> %1, <4 x i32> %6, <4 x i32> %vec.phi
71 %9 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %8)
72 br label %for.cond.cleanup
74 for.cond.cleanup: ; preds = %middle.block, %entry
75 %res.0.lcssa = phi i32 [ 0, %entry ], [ %9, %middle.block ]
79 define dso_local i32 @mul_reduce_add_const(i32* noalias nocapture readonly %a, i32 %b, i32 %N) {
80 ; CHECK-LABEL: mul_reduce_add_const:
81 ; CHECK: @ %bb.0: @ %entry
82 ; CHECK-NEXT: cmp r2, #0
84 ; CHECK-NEXT: moveq r0, #0
86 ; CHECK-NEXT: push {r7, lr}
87 ; CHECK-NEXT: adds r1, r2, #3
88 ; CHECK-NEXT: movs r3, #1
89 ; CHECK-NEXT: bic r1, r1, #3
90 ; CHECK-NEXT: vmov.i32 q0, #0x0
91 ; CHECK-NEXT: subs r1, #4
92 ; CHECK-NEXT: add.w lr, r3, r1, lsr #2
93 ; CHECK-NEXT: dls lr, lr
94 ; CHECK-NEXT: .LBB1_1: @ %vector.body
95 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
96 ; CHECK-NEXT: mov r1, r2
97 ; CHECK-NEXT: vctp.32 r2
98 ; CHECK-NEXT: vmov q1, q0
100 ; CHECK-NEXT: vldrwt.u32 q0, [r0]
101 ; CHECK-NEXT: adds r0, #16
102 ; CHECK-NEXT: subs r2, #4
103 ; CHECK-NEXT: vadd.i32 q0, q0, q1
104 ; CHECK-NEXT: le lr, .LBB1_1
105 ; CHECK-NEXT: @ %bb.2: @ %middle.block
106 ; CHECK-NEXT: vctp.32 r1
107 ; CHECK-NEXT: vpsel q0, q0, q1
108 ; CHECK-NEXT: vaddv.u32 r0, q0
109 ; CHECK-NEXT: pop {r7, pc}
111 %cmp6 = icmp eq i32 %N, 0
112 br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
114 vector.ph: ; preds = %entry
115 %n.rnd.up = add i32 %N, 3
116 %n.vec = and i32 %n.rnd.up, -4
117 %trip.count.minus.1 = add i32 %N, -1
118 %broadcast.splatinsert9 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
119 %broadcast.splat10 = shufflevector <4 x i32> %broadcast.splatinsert9, <4 x i32> undef, <4 x i32> zeroinitializer
120 br label %vector.body
122 vector.body: ; preds = %vector.body, %vector.ph
123 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
124 %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %3, %vector.body ]
125 %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
126 %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
127 %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
128 %0 = getelementptr inbounds i32, i32* %a, i32 %index
129 %1 = icmp ule <4 x i32> %induction, %broadcast.splat10
130 %2 = bitcast i32* %0 to <4 x i32>*
131 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %1, <4 x i32> undef)
132 %3 = add nsw <4 x i32> %wide.masked.load, %vec.phi
133 %index.next = add i32 %index, 4
134 %4 = icmp eq i32 %index.next, %n.vec
135 br i1 %4, label %middle.block, label %vector.body
137 middle.block: ; preds = %vector.body
138 %5 = select <4 x i1> %1, <4 x i32> %3, <4 x i32> %vec.phi
139 %6 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %5)
140 br label %for.cond.cleanup
142 for.cond.cleanup: ; preds = %middle.block, %entry
143 %res.0.lcssa = phi i32 [ 0, %entry ], [ %6, %middle.block ]
147 define dso_local i32 @add_reduce_add_const(i32* noalias nocapture readonly %a, i32 %b, i32 %N) {
148 ; CHECK-LABEL: add_reduce_add_const:
149 ; CHECK: @ %bb.0: @ %entry
150 ; CHECK-NEXT: cmp r2, #0
152 ; CHECK-NEXT: moveq r0, #0
153 ; CHECK-NEXT: bxeq lr
154 ; CHECK-NEXT: push {r7, lr}
155 ; CHECK-NEXT: adds r1, r2, #3
156 ; CHECK-NEXT: movs r3, #1
157 ; CHECK-NEXT: bic r1, r1, #3
158 ; CHECK-NEXT: vmov.i32 q0, #0x0
159 ; CHECK-NEXT: subs r1, #4
160 ; CHECK-NEXT: add.w lr, r3, r1, lsr #2
161 ; CHECK-NEXT: dls lr, lr
162 ; CHECK-NEXT: .LBB2_1: @ %vector.body
163 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
164 ; CHECK-NEXT: mov r1, r2
165 ; CHECK-NEXT: vctp.32 r2
166 ; CHECK-NEXT: vmov q1, q0
168 ; CHECK-NEXT: vldrwt.u32 q0, [r0]
169 ; CHECK-NEXT: adds r0, #16
170 ; CHECK-NEXT: subs r2, #4
171 ; CHECK-NEXT: vadd.i32 q0, q0, q1
172 ; CHECK-NEXT: le lr, .LBB2_1
173 ; CHECK-NEXT: @ %bb.2: @ %middle.block
174 ; CHECK-NEXT: vctp.32 r1
175 ; CHECK-NEXT: vpsel q0, q0, q1
176 ; CHECK-NEXT: vaddv.u32 r0, q0
177 ; CHECK-NEXT: pop {r7, pc}
179 %cmp6 = icmp eq i32 %N, 0
180 br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
182 vector.ph: ; preds = %entry
183 %n.rnd.up = add i32 %N, 3
184 %n.vec = and i32 %n.rnd.up, -4
185 %trip.count.minus.1 = add i32 %N, -1
186 %broadcast.splatinsert9 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
187 %broadcast.splat10 = shufflevector <4 x i32> %broadcast.splatinsert9, <4 x i32> undef, <4 x i32> zeroinitializer
188 br label %vector.body
190 vector.body: ; preds = %vector.body, %vector.ph
191 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
192 %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %3, %vector.body ]
193 %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
194 %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
195 %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
196 %0 = getelementptr inbounds i32, i32* %a, i32 %index
197 %1 = icmp ule <4 x i32> %induction, %broadcast.splat10
198 %2 = bitcast i32* %0 to <4 x i32>*
199 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %1, <4 x i32> undef)
200 %3 = add nsw <4 x i32> %wide.masked.load, %vec.phi
201 %index.next = add i32 %index, 4
202 %4 = icmp eq i32 %index.next, %n.vec
203 br i1 %4, label %middle.block, label %vector.body
205 middle.block: ; preds = %vector.body
206 %5 = select <4 x i1> %1, <4 x i32> %3, <4 x i32> %vec.phi
207 %6 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %5)
208 br label %for.cond.cleanup
210 for.cond.cleanup: ; preds = %middle.block, %entry
211 %res.0.lcssa = phi i32 [ 0, %entry ], [ %6, %middle.block ]
215 define dso_local void @vector_mul_const(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32 %c, i32 %N) {
216 ; CHECK-LABEL: vector_mul_const:
217 ; CHECK: @ %bb.0: @ %entry
218 ; CHECK-NEXT: push {r7, lr}
219 ; CHECK-NEXT: cmp r3, #0
221 ; CHECK-NEXT: popeq {r7, pc}
222 ; CHECK-NEXT: add.w r12, r3, #3
223 ; CHECK-NEXT: mov.w lr, #1
224 ; CHECK-NEXT: bic r12, r12, #3
225 ; CHECK-NEXT: sub.w r12, r12, #4
226 ; CHECK-NEXT: add.w lr, lr, r12, lsr #2
227 ; CHECK-NEXT: dls lr, lr
228 ; CHECK-NEXT: .LBB3_1: @ %vector.body
229 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
230 ; CHECK-NEXT: vctp.32 r3
232 ; CHECK-NEXT: vldrwt.u32 q0, [r1]
233 ; CHECK-NEXT: vmul.i32 q0, q0, r2
235 ; CHECK-NEXT: vstrwt.32 q0, [r0]
236 ; CHECK-NEXT: adds r1, #16
237 ; CHECK-NEXT: adds r0, #16
238 ; CHECK-NEXT: subs r3, #4
239 ; CHECK-NEXT: le lr, .LBB3_1
240 ; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
241 ; CHECK-NEXT: pop {r7, pc}
243 %cmp6 = icmp eq i32 %N, 0
244 br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
246 vector.ph: ; preds = %entry
247 %n.rnd.up = add i32 %N, 3
248 %n.vec = and i32 %n.rnd.up, -4
249 %trip.count.minus.1 = add i32 %N, -1
250 %broadcast.splatinsert8 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
251 %broadcast.splat9 = shufflevector <4 x i32> %broadcast.splatinsert8, <4 x i32> undef, <4 x i32> zeroinitializer
252 %broadcast.splatinsert10 = insertelement <4 x i32> undef, i32 %c, i32 0
253 %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, <4 x i32> undef, <4 x i32> zeroinitializer
254 br label %vector.body
256 vector.body: ; preds = %vector.body, %vector.ph
257 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
258 %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
259 %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
260 %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
261 %0 = getelementptr inbounds i32, i32* %b, i32 %index
262 %1 = icmp ule <4 x i32> %induction, %broadcast.splat9
263 %2 = bitcast i32* %0 to <4 x i32>*
264 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %1, <4 x i32> undef)
265 %3 = mul nsw <4 x i32> %wide.masked.load, %broadcast.splat11
266 %4 = getelementptr inbounds i32, i32* %a, i32 %index
267 %5 = bitcast i32* %4 to <4 x i32>*
268 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %5, i32 4, <4 x i1> %1)
269 %index.next = add i32 %index, 4
270 %6 = icmp eq i32 %index.next, %n.vec
271 br i1 %6, label %for.cond.cleanup, label %vector.body
273 for.cond.cleanup: ; preds = %vector.body, %entry
277 define dso_local void @vector_add_const(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32 %c, i32 %N) {
278 ; CHECK-LABEL: vector_add_const:
279 ; CHECK: @ %bb.0: @ %entry
280 ; CHECK-NEXT: push {r7, lr}
281 ; CHECK-NEXT: cmp r3, #0
283 ; CHECK-NEXT: popeq {r7, pc}
284 ; CHECK-NEXT: add.w r12, r3, #3
285 ; CHECK-NEXT: mov.w lr, #1
286 ; CHECK-NEXT: bic r12, r12, #3
287 ; CHECK-NEXT: sub.w r12, r12, #4
288 ; CHECK-NEXT: add.w lr, lr, r12, lsr #2
289 ; CHECK-NEXT: dls lr, lr
290 ; CHECK-NEXT: .LBB4_1: @ %vector.body
291 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
292 ; CHECK-NEXT: vctp.32 r3
294 ; CHECK-NEXT: vldrwt.u32 q0, [r1]
295 ; CHECK-NEXT: vadd.i32 q0, q0, r2
297 ; CHECK-NEXT: vstrwt.32 q0, [r0]
298 ; CHECK-NEXT: adds r1, #16
299 ; CHECK-NEXT: adds r0, #16
300 ; CHECK-NEXT: subs r3, #4
301 ; CHECK-NEXT: le lr, .LBB4_1
302 ; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
303 ; CHECK-NEXT: pop {r7, pc}
305 %cmp6 = icmp eq i32 %N, 0
306 br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
308 vector.ph: ; preds = %entry
309 %n.rnd.up = add i32 %N, 3
310 %n.vec = and i32 %n.rnd.up, -4
311 %trip.count.minus.1 = add i32 %N, -1
312 %broadcast.splatinsert8 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
313 %broadcast.splat9 = shufflevector <4 x i32> %broadcast.splatinsert8, <4 x i32> undef, <4 x i32> zeroinitializer
314 %broadcast.splatinsert10 = insertelement <4 x i32> undef, i32 %c, i32 0
315 %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, <4 x i32> undef, <4 x i32> zeroinitializer
316 br label %vector.body
318 vector.body: ; preds = %vector.body, %vector.ph
319 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
320 %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
321 %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
322 %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
323 %0 = getelementptr inbounds i32, i32* %b, i32 %index
324 %1 = icmp ule <4 x i32> %induction, %broadcast.splat9
325 %2 = bitcast i32* %0 to <4 x i32>*
326 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %1, <4 x i32> undef)
327 %3 = add nsw <4 x i32> %wide.masked.load, %broadcast.splat11
328 %4 = getelementptr inbounds i32, i32* %a, i32 %index
329 %5 = bitcast i32* %4 to <4 x i32>*
330 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %3, <4 x i32>* %5, i32 4, <4 x i1> %1)
331 %index.next = add i32 %index, 4
332 %6 = icmp eq i32 %index.next, %n.vec
333 br i1 %6, label %for.cond.cleanup, label %vector.body
335 for.cond.cleanup: ; preds = %vector.body, %entry
339 declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
340 declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #4
341 declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)