1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
4 ; Check some LSR loop postinc
6 ; fma loop with a destination that is the same as one of the sources
7 define void @fma(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, ptr noalias nocapture %C, i32 %n) {
9 ; CHECK: @ %bb.0: @ %entry
10 ; CHECK-NEXT: .save {r4, r5, r6, lr}
11 ; CHECK-NEXT: push {r4, r5, r6, lr}
12 ; CHECK-NEXT: cmp r3, #1
13 ; CHECK-NEXT: blt .LBB0_8
14 ; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
15 ; CHECK-NEXT: cmp r3, #3
16 ; CHECK-NEXT: bhi .LBB0_3
17 ; CHECK-NEXT: @ %bb.2:
18 ; CHECK-NEXT: mov.w r12, #0
19 ; CHECK-NEXT: b .LBB0_6
20 ; CHECK-NEXT: .LBB0_3: @ %vector.ph
21 ; CHECK-NEXT: bic r12, r3, #3
22 ; CHECK-NEXT: movs r5, #1
23 ; CHECK-NEXT: sub.w r6, r12, #4
24 ; CHECK-NEXT: mov r4, r0
25 ; CHECK-NEXT: add.w lr, r5, r6, lsr #2
26 ; CHECK-NEXT: mov r5, r1
27 ; CHECK-NEXT: mov r6, r2
28 ; CHECK-NEXT: .LBB0_4: @ %vector.body
29 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
30 ; CHECK-NEXT: vldrw.u32 q0, [r4], #16
31 ; CHECK-NEXT: vldrw.u32 q1, [r5], #16
32 ; CHECK-NEXT: vldrw.u32 q2, [r6]
33 ; CHECK-NEXT: vfma.f32 q2, q1, q0
34 ; CHECK-NEXT: vstrb.8 q2, [r6], #16
35 ; CHECK-NEXT: le lr, .LBB0_4
36 ; CHECK-NEXT: @ %bb.5: @ %middle.block
37 ; CHECK-NEXT: cmp r12, r3
39 ; CHECK-NEXT: popeq {r4, r5, r6, pc}
40 ; CHECK-NEXT: .LBB0_6: @ %for.body.preheader12
41 ; CHECK-NEXT: sub.w lr, r3, r12
42 ; CHECK-NEXT: add.w r0, r0, r12, lsl #2
43 ; CHECK-NEXT: add.w r1, r1, r12, lsl #2
44 ; CHECK-NEXT: add.w r2, r2, r12, lsl #2
45 ; CHECK-NEXT: .LBB0_7: @ %for.body
46 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
47 ; CHECK-NEXT: vldmia r0!, {s0}
48 ; CHECK-NEXT: vldmia r1!, {s2}
49 ; CHECK-NEXT: vldr s4, [r2]
50 ; CHECK-NEXT: vfma.f32 s4, s2, s0
51 ; CHECK-NEXT: vstmia r2!, {s4}
52 ; CHECK-NEXT: le lr, .LBB0_7
53 ; CHECK-NEXT: .LBB0_8: @ %for.cond.cleanup
54 ; CHECK-NEXT: pop {r4, r5, r6, pc}
56 %cmp8 = icmp sgt i32 %n, 0
57 br i1 %cmp8, label %for.body.preheader, label %for.cond.cleanup
59 for.body.preheader: ; preds = %entry
60 %min.iters.check = icmp ult i32 %n, 4
61 br i1 %min.iters.check, label %for.body.preheader12, label %vector.ph
63 for.body.preheader12: ; preds = %middle.block, %for.body.preheader
64 %i.09.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
67 vector.ph: ; preds = %for.body.preheader
68 %n.vec = and i32 %n, -4
71 vector.body: ; preds = %vector.body, %vector.ph
72 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
73 %0 = getelementptr inbounds float, ptr %A, i32 %index
74 %1 = bitcast ptr %0 to ptr
75 %wide.load = load <4 x float>, ptr %1, align 4
76 %2 = getelementptr inbounds float, ptr %B, i32 %index
77 %3 = bitcast ptr %2 to ptr
78 %wide.load10 = load <4 x float>, ptr %3, align 4
79 %4 = fmul fast <4 x float> %wide.load10, %wide.load
80 %5 = getelementptr inbounds float, ptr %C, i32 %index
81 %6 = bitcast ptr %5 to ptr
82 %wide.load11 = load <4 x float>, ptr %6, align 4
83 %7 = fadd fast <4 x float> %wide.load11, %4
84 %8 = bitcast ptr %5 to ptr
85 store <4 x float> %7, ptr %8, align 4
86 %index.next = add i32 %index, 4
87 %9 = icmp eq i32 %index.next, %n.vec
88 br i1 %9, label %middle.block, label %vector.body
90 middle.block: ; preds = %vector.body
91 %cmp.n = icmp eq i32 %n.vec, %n
92 br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader12
94 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
97 for.body: ; preds = %for.body.preheader12, %for.body
98 %i.09 = phi i32 [ %inc, %for.body ], [ %i.09.ph, %for.body.preheader12 ]
99 %arrayidx = getelementptr inbounds float, ptr %A, i32 %i.09
100 %10 = load float, ptr %arrayidx, align 4
101 %arrayidx1 = getelementptr inbounds float, ptr %B, i32 %i.09
102 %11 = load float, ptr %arrayidx1, align 4
103 %mul = fmul fast float %11, %10
104 %arrayidx2 = getelementptr inbounds float, ptr %C, i32 %i.09
105 %12 = load float, ptr %arrayidx2, align 4
106 %add = fadd fast float %12, %mul
107 store float %add, ptr %arrayidx2, align 4
108 %inc = add nuw nsw i32 %i.09, 1
109 %exitcond = icmp eq i32 %inc, %n
110 br i1 %exitcond, label %for.cond.cleanup, label %for.body
114 ; Same as above but tail predicated
115 ; FIXME: The postinc here is put on the load, not the store. An extra mov is needed in the loop because of it.
116 define void @fma_tailpred(ptr noalias nocapture readonly %A, ptr noalias nocapture readonly %B, ptr noalias nocapture %C, i32 %n) {
117 ; CHECK-LABEL: fma_tailpred:
118 ; CHECK: @ %bb.0: @ %entry
119 ; CHECK-NEXT: .save {r4, lr}
120 ; CHECK-NEXT: push {r4, lr}
121 ; CHECK-NEXT: .vsave {d8, d9}
122 ; CHECK-NEXT: vpush {d8, d9}
123 ; CHECK-NEXT: cmp r3, #1
124 ; CHECK-NEXT: blt .LBB1_3
125 ; CHECK-NEXT: @ %bb.1: @ %vector.ph
126 ; CHECK-NEXT: add.w r12, r3, #3
127 ; CHECK-NEXT: mov.w lr, #1
128 ; CHECK-NEXT: bic r12, r12, #3
129 ; CHECK-NEXT: adr r4, .LCPI1_0
130 ; CHECK-NEXT: sub.w r12, r12, #4
131 ; CHECK-NEXT: vldrw.u32 q0, [r4]
132 ; CHECK-NEXT: add.w lr, lr, r12, lsr #2
133 ; CHECK-NEXT: sub.w r12, r3, #1
134 ; CHECK-NEXT: movs r3, #0
135 ; CHECK-NEXT: vdup.32 q1, r12
136 ; CHECK-NEXT: .LBB1_2: @ %vector.body
137 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
138 ; CHECK-NEXT: vdup.32 q2, r3
139 ; CHECK-NEXT: adds r3, #4
140 ; CHECK-NEXT: vorr q2, q2, q0
141 ; CHECK-NEXT: vpttt.u32 cs, q1, q2
142 ; CHECK-NEXT: vldrwt.u32 q2, [r0], #16
143 ; CHECK-NEXT: vldrwt.u32 q3, [r1], #16
144 ; CHECK-NEXT: vldrwt.u32 q4, [r2]
145 ; CHECK-NEXT: vfma.f32 q4, q3, q2
147 ; CHECK-NEXT: vstrwt.32 q4, [r2], #16
148 ; CHECK-NEXT: le lr, .LBB1_2
149 ; CHECK-NEXT: .LBB1_3: @ %for.cond.cleanup
150 ; CHECK-NEXT: vpop {d8, d9}
151 ; CHECK-NEXT: pop {r4, pc}
152 ; CHECK-NEXT: .p2align 4
153 ; CHECK-NEXT: @ %bb.4:
154 ; CHECK-NEXT: .LCPI1_0:
155 ; CHECK-NEXT: .long 0 @ 0x0
156 ; CHECK-NEXT: .long 1 @ 0x1
157 ; CHECK-NEXT: .long 2 @ 0x2
158 ; CHECK-NEXT: .long 3 @ 0x3
160 %cmp8 = icmp sgt i32 %n, 0
161 br i1 %cmp8, label %vector.ph, label %for.cond.cleanup
163 vector.ph: ; preds = %entry
164 %n.rnd.up = add i32 %n, 3
165 %n.vec = and i32 %n.rnd.up, -4
166 %trip.count.minus.1 = add i32 %n, -1
167 %broadcast.splatinsert10 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
168 %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, <4 x i32> undef, <4 x i32> zeroinitializer
169 br label %vector.body
171 vector.body: ; preds = %vector.body, %vector.ph
172 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
173 %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
174 %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
175 %induction = or <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
176 %0 = getelementptr inbounds float, ptr %A, i32 %index
177 %1 = icmp ule <4 x i32> %induction, %broadcast.splat11
178 %2 = bitcast ptr %0 to ptr
179 %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
180 %3 = getelementptr inbounds float, ptr %B, i32 %index
181 %4 = bitcast ptr %3 to ptr
182 %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %4, i32 4, <4 x i1> %1, <4 x float> undef)
183 %5 = fmul fast <4 x float> %wide.masked.load12, %wide.masked.load
184 %6 = getelementptr inbounds float, ptr %C, i32 %index
185 %7 = bitcast ptr %6 to ptr
186 %wide.masked.load13 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %7, i32 4, <4 x i1> %1, <4 x float> undef)
187 %8 = fadd fast <4 x float> %wide.masked.load13, %5
188 %9 = bitcast ptr %6 to ptr
189 call void @llvm.masked.store.v4f32.p0(<4 x float> %8, ptr %9, i32 4, <4 x i1> %1)
190 %index.next = add i32 %index, 4
191 %10 = icmp eq i32 %index.next, %n.vec
192 br i1 %10, label %for.cond.cleanup, label %vector.body
194 for.cond.cleanup: ; preds = %vector.body, %entry
199 ; Multiple loads of the loop with a common base
200 define ptr @test(ptr nocapture readonly %input_row, ptr nocapture readonly %input_col, i16 zeroext %output_ch, i16 zeroext %num_cols, i32 %col_offset, i16 signext %activation_min, i16 zeroext %row_len, ptr nocapture readonly %bias, ptr returned %out) {
202 ; CHECK: @ %bb.0: @ %entry
203 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
204 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
205 ; CHECK-NEXT: .pad #20
206 ; CHECK-NEXT: sub sp, #20
207 ; CHECK-NEXT: cmp r3, #4
208 ; CHECK-NEXT: strd r0, r1, [sp, #12] @ 8-byte Folded Spill
209 ; CHECK-NEXT: bne .LBB2_8
210 ; CHECK-NEXT: @ %bb.1: @ %for.cond.preheader
211 ; CHECK-NEXT: cmp r2, #0
212 ; CHECK-NEXT: beq .LBB2_8
213 ; CHECK-NEXT: @ %bb.2: @ %for.body.lr.ph
214 ; CHECK-NEXT: ldr r3, [sp, #64]
215 ; CHECK-NEXT: mov.w r9, #0
216 ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
217 ; CHECK-NEXT: ldr.w r11, [sp, #56]
218 ; CHECK-NEXT: add.w r0, r1, r3, lsl #1
219 ; CHECK-NEXT: str r0, [sp, #8] @ 4-byte Spill
220 ; CHECK-NEXT: adds r0, r1, r3
221 ; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill
222 ; CHECK-NEXT: add.w r0, r3, r3, lsl #1
223 ; CHECK-NEXT: add r0, r1
224 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill
225 ; CHECK-NEXT: adds r0, r3, #7
226 ; CHECK-NEXT: lsrs r0, r0, #3
227 ; CHECK-NEXT: b .LBB2_5
228 ; CHECK-NEXT: .LBB2_3: @ in Loop: Header=BB2_5 Depth=1
229 ; CHECK-NEXT: mov r10, r12
230 ; CHECK-NEXT: mov r8, r12
231 ; CHECK-NEXT: mov r6, r12
232 ; CHECK-NEXT: .LBB2_4: @ %for.cond.cleanup23
233 ; CHECK-NEXT: @ in Loop: Header=BB2_5 Depth=1
234 ; CHECK-NEXT: ldr r3, [sp, #72]
235 ; CHECK-NEXT: add.w r1, r8, r10
236 ; CHECK-NEXT: add r1, r6
237 ; CHECK-NEXT: add r1, r12
238 ; CHECK-NEXT: strb.w r1, [r3, r9]
239 ; CHECK-NEXT: add.w r9, r9, #1
240 ; CHECK-NEXT: cmp r9, r2
241 ; CHECK-NEXT: beq .LBB2_8
242 ; CHECK-NEXT: .LBB2_5: @ %for.body
243 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
244 ; CHECK-NEXT: @ Child Loop BB2_7 Depth 2
245 ; CHECK-NEXT: ldr r1, [sp, #68]
246 ; CHECK-NEXT: ldr.w r12, [r1, r9, lsl #2]
247 ; CHECK-NEXT: subs r1, r0, r0
248 ; CHECK-NEXT: ble .LBB2_3
249 ; CHECK-NEXT: @ %bb.6: @ %for.body24.preheader
250 ; CHECK-NEXT: @ in Loop: Header=BB2_5 Depth=1
251 ; CHECK-NEXT: ldr r7, [sp, #64]
252 ; CHECK-NEXT: mov r6, r12
253 ; CHECK-NEXT: ldr r3, [sp, #12] @ 4-byte Reload
254 ; CHECK-NEXT: dls lr, r1
255 ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
256 ; CHECK-NEXT: mov r8, r12
257 ; CHECK-NEXT: mla r7, r9, r7, r3
258 ; CHECK-NEXT: ldr r5, [sp, #8] @ 4-byte Reload
259 ; CHECK-NEXT: ldrd r4, r3, [sp] @ 8-byte Folded Reload
260 ; CHECK-NEXT: mov r10, r12
261 ; CHECK-NEXT: .LBB2_7: @ %for.body24
262 ; CHECK-NEXT: @ Parent Loop BB2_5 Depth=1
263 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
264 ; CHECK-NEXT: vldrb.s16 q0, [r4], #8
265 ; CHECK-NEXT: vadd.i16 q1, q0, r11
266 ; CHECK-NEXT: vldrb.s16 q0, [r7], #8
267 ; CHECK-NEXT: vmlava.s16 r12, q0, q1
268 ; CHECK-NEXT: vldrb.s16 q1, [r5], #8
269 ; CHECK-NEXT: vadd.i16 q1, q1, r11
270 ; CHECK-NEXT: vmlava.s16 r6, q0, q1
271 ; CHECK-NEXT: vldrb.s16 q1, [r3], #8
272 ; CHECK-NEXT: vadd.i16 q1, q1, r11
273 ; CHECK-NEXT: vmlava.s16 r8, q0, q1
274 ; CHECK-NEXT: vldrb.s16 q1, [r1], #8
275 ; CHECK-NEXT: vadd.i16 q1, q1, r11
276 ; CHECK-NEXT: vmlava.s16 r10, q0, q1
277 ; CHECK-NEXT: le lr, .LBB2_7
278 ; CHECK-NEXT: b .LBB2_4
279 ; CHECK-NEXT: .LBB2_8: @ %if.end
280 ; CHECK-NEXT: ldr r0, [sp, #72]
281 ; CHECK-NEXT: add sp, #20
282 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
284 %cmp = icmp eq i16 %num_cols, 4
285 br i1 %cmp, label %for.cond.preheader, label %if.end
287 for.cond.preheader: ; preds = %entry
288 %conv2 = zext i16 %output_ch to i32
289 %cmp3114 = icmp eq i16 %output_ch, 0
290 br i1 %cmp3114, label %if.end, label %for.body.lr.ph
292 for.body.lr.ph: ; preds = %for.cond.preheader
293 %conv5 = zext i16 %row_len to i32
294 %add.ptr9 = getelementptr inbounds i8, ptr %input_col, i32 %conv5
295 %mul11 = shl nuw nsw i32 %conv5, 1
296 %add.ptr12 = getelementptr inbounds i8, ptr %input_col, i32 %mul11
297 %mul14 = mul nuw nsw i32 %conv5, 3
298 %add.ptr15 = getelementptr inbounds i8, ptr %input_col, i32 %mul14
299 %add = add nuw nsw i32 %conv5, 7
300 %div = lshr i32 %add, 3
301 %conv25 = trunc i32 %col_offset to i16
302 %.splatinsert = insertelement <8 x i16> undef, i16 %conv25, i32 0
303 %.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
306 for.body: ; preds = %for.cond.cleanup23, %for.body.lr.ph
307 %i_out_ch.0116 = phi i32 [ 0, %for.body.lr.ph ], [ %inc37, %for.cond.cleanup23 ]
308 %i_row_loop.0115 = phi i32 [ undef, %for.body.lr.ph ], [ %i_row_loop.1.lcssa, %for.cond.cleanup23 ]
309 %arrayidx = getelementptr inbounds i32, ptr %bias, i32 %i_out_ch.0116
310 %0 = load i32, ptr %arrayidx, align 4
311 %cmp2199 = icmp slt i32 %i_row_loop.0115, %div
312 br i1 %cmp2199, label %for.body24.preheader, label %for.cond.cleanup23
314 for.body24.preheader: ; preds = %for.body
315 %mul = mul nuw nsw i32 %i_out_ch.0116, %conv5
316 %add.ptr = getelementptr inbounds i8, ptr %input_row, i32 %mul
319 for.cond.cleanup23: ; preds = %for.body24, %for.body
320 %acc_0.0.lcssa = phi i32 [ %0, %for.body ], [ %20, %for.body24 ]
321 %acc_1.0.lcssa = phi i32 [ %0, %for.body ], [ %21, %for.body24 ]
322 %acc_2.0.lcssa = phi i32 [ %0, %for.body ], [ %22, %for.body24 ]
323 %acc_3.0.lcssa = phi i32 [ %0, %for.body ], [ %23, %for.body24 ]
324 %i_row_loop.1.lcssa = phi i32 [ %i_row_loop.0115, %for.body ], [ %div, %for.body24 ]
325 %add31 = add nsw i32 %acc_1.0.lcssa, %acc_0.0.lcssa
326 %add32 = add nsw i32 %add31, %acc_2.0.lcssa
327 %add33 = add nsw i32 %add32, %acc_3.0.lcssa
328 %conv34 = trunc i32 %add33 to i8
329 %arrayidx35 = getelementptr inbounds i8, ptr %out, i32 %i_out_ch.0116
330 store i8 %conv34, ptr %arrayidx35, align 1
331 %inc37 = add nuw nsw i32 %i_out_ch.0116, 1
332 %exitcond120 = icmp eq i32 %inc37, %conv2
333 br i1 %exitcond120, label %if.end, label %for.body
335 for.body24: ; preds = %for.body24, %for.body24.preheader
336 %ip_r0.0109 = phi ptr [ %add.ptr26, %for.body24 ], [ %add.ptr, %for.body24.preheader ]
337 %ip_c0.0108 = phi ptr [ %add.ptr27, %for.body24 ], [ %input_col, %for.body24.preheader ]
338 %ip_c1.0107 = phi ptr [ %add.ptr28, %for.body24 ], [ %add.ptr9, %for.body24.preheader ]
339 %ip_c2.0106 = phi ptr [ %add.ptr29, %for.body24 ], [ %add.ptr12, %for.body24.preheader ]
340 %i_row_loop.1105 = phi i32 [ %inc, %for.body24 ], [ %i_row_loop.0115, %for.body24.preheader ]
341 %ip_c3.0104 = phi ptr [ %add.ptr30, %for.body24 ], [ %add.ptr15, %for.body24.preheader ]
342 %acc_3.0103 = phi i32 [ %23, %for.body24 ], [ %0, %for.body24.preheader ]
343 %acc_2.0102 = phi i32 [ %22, %for.body24 ], [ %0, %for.body24.preheader ]
344 %acc_1.0101 = phi i32 [ %21, %for.body24 ], [ %0, %for.body24.preheader ]
345 %acc_0.0100 = phi i32 [ %20, %for.body24 ], [ %0, %for.body24.preheader ]
346 %1 = bitcast ptr %ip_r0.0109 to ptr
347 %2 = load <8 x i8>, ptr %1, align 1
348 %3 = sext <8 x i8> %2 to <8 x i16>
349 %add.ptr26 = getelementptr inbounds i8, ptr %ip_r0.0109, i32 8
350 %4 = bitcast ptr %ip_c0.0108 to ptr
351 %5 = load <8 x i8>, ptr %4, align 1
352 %6 = sext <8 x i8> %5 to <8 x i16>
353 %add.ptr27 = getelementptr inbounds i8, ptr %ip_c0.0108, i32 8
354 %7 = add <8 x i16> %.splat, %6
355 %8 = bitcast ptr %ip_c1.0107 to ptr
356 %9 = load <8 x i8>, ptr %8, align 1
357 %10 = sext <8 x i8> %9 to <8 x i16>
358 %add.ptr28 = getelementptr inbounds i8, ptr %ip_c1.0107, i32 8
359 %11 = add <8 x i16> %.splat, %10
360 %12 = bitcast ptr %ip_c2.0106 to ptr
361 %13 = load <8 x i8>, ptr %12, align 1
362 %14 = sext <8 x i8> %13 to <8 x i16>
363 %add.ptr29 = getelementptr inbounds i8, ptr %ip_c2.0106, i32 8
364 %15 = add <8 x i16> %.splat, %14
365 %16 = bitcast ptr %ip_c3.0104 to ptr
366 %17 = load <8 x i8>, ptr %16, align 1
367 %18 = sext <8 x i8> %17 to <8 x i16>
368 %add.ptr30 = getelementptr inbounds i8, ptr %ip_c3.0104, i32 8
369 %19 = add <8 x i16> %.splat, %18
370 %20 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %acc_0.0100, <8 x i16> %3, <8 x i16> %7)
371 %21 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %acc_1.0101, <8 x i16> %3, <8 x i16> %11)
372 %22 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %acc_2.0102, <8 x i16> %3, <8 x i16> %15)
373 %23 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %acc_3.0103, <8 x i16> %3, <8 x i16> %19)
374 %inc = add nsw i32 %i_row_loop.1105, 1
375 %exitcond = icmp eq i32 %inc, %div
376 br i1 %exitcond, label %for.cond.cleanup23, label %for.body24
378 if.end: ; preds = %for.cond.cleanup23, %for.cond.preheader, %entry
382 ; Same as above with optsize
383 define ptr @test_optsize(ptr nocapture readonly %input_row, ptr nocapture readonly %input_col, i16 zeroext %output_ch, i16 zeroext %num_cols, i32 %col_offset, i16 signext %activation_min, i16 zeroext %row_len, ptr nocapture readonly %bias, ptr returned %out) optsize {
384 ; CHECK-LABEL: test_optsize:
385 ; CHECK: @ %bb.0: @ %entry
386 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
387 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
388 ; CHECK-NEXT: .pad #20
389 ; CHECK-NEXT: sub sp, #20
390 ; CHECK-NEXT: cmp r3, #4
391 ; CHECK-NEXT: strd r0, r1, [sp, #12] @ 8-byte Folded Spill
392 ; CHECK-NEXT: bne .LBB3_8
393 ; CHECK-NEXT: @ %bb.1: @ %for.cond.preheader
394 ; CHECK-NEXT: cmp r2, #0
395 ; CHECK-NEXT: beq .LBB3_8
396 ; CHECK-NEXT: @ %bb.2: @ %for.body.lr.ph
397 ; CHECK-NEXT: ldr r3, [sp, #64]
398 ; CHECK-NEXT: mov.w r9, #0
399 ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
400 ; CHECK-NEXT: ldr.w r11, [sp, #56]
401 ; CHECK-NEXT: add.w r0, r1, r3, lsl #1
402 ; CHECK-NEXT: str r0, [sp, #8] @ 4-byte Spill
403 ; CHECK-NEXT: adds r0, r1, r3
404 ; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill
405 ; CHECK-NEXT: add.w r0, r3, r3, lsl #1
406 ; CHECK-NEXT: add r0, r1
407 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill
408 ; CHECK-NEXT: adds r0, r3, #7
409 ; CHECK-NEXT: lsrs r0, r0, #3
410 ; CHECK-NEXT: .LBB3_3: @ %for.body
411 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
412 ; CHECK-NEXT: @ Child Loop BB3_5 Depth 2
413 ; CHECK-NEXT: ldr r1, [sp, #68]
414 ; CHECK-NEXT: ldr.w r12, [r1, r9, lsl #2]
415 ; CHECK-NEXT: subs r1, r0, r0
416 ; CHECK-NEXT: ble .LBB3_6
417 ; CHECK-NEXT: @ %bb.4: @ %for.body24.preheader
418 ; CHECK-NEXT: @ in Loop: Header=BB3_3 Depth=1
419 ; CHECK-NEXT: ldr r7, [sp, #64]
420 ; CHECK-NEXT: mov r6, r12
421 ; CHECK-NEXT: ldr r3, [sp, #12] @ 4-byte Reload
422 ; CHECK-NEXT: dls lr, r1
423 ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
424 ; CHECK-NEXT: mov r8, r12
425 ; CHECK-NEXT: mla r7, r9, r7, r3
426 ; CHECK-NEXT: ldr r5, [sp, #8] @ 4-byte Reload
427 ; CHECK-NEXT: ldrd r4, r3, [sp] @ 8-byte Folded Reload
428 ; CHECK-NEXT: mov r10, r12
429 ; CHECK-NEXT: .LBB3_5: @ %for.body24
430 ; CHECK-NEXT: @ Parent Loop BB3_3 Depth=1
431 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
432 ; CHECK-NEXT: vldrb.s16 q0, [r4], #8
433 ; CHECK-NEXT: vadd.i16 q1, q0, r11
434 ; CHECK-NEXT: vldrb.s16 q0, [r7], #8
435 ; CHECK-NEXT: vmlava.s16 r12, q0, q1
436 ; CHECK-NEXT: vldrb.s16 q1, [r5], #8
437 ; CHECK-NEXT: vadd.i16 q1, q1, r11
438 ; CHECK-NEXT: vmlava.s16 r6, q0, q1
439 ; CHECK-NEXT: vldrb.s16 q1, [r3], #8
440 ; CHECK-NEXT: vadd.i16 q1, q1, r11
441 ; CHECK-NEXT: vmlava.s16 r8, q0, q1
442 ; CHECK-NEXT: vldrb.s16 q1, [r1], #8
443 ; CHECK-NEXT: vadd.i16 q1, q1, r11
444 ; CHECK-NEXT: vmlava.s16 r10, q0, q1
445 ; CHECK-NEXT: le lr, .LBB3_5
446 ; CHECK-NEXT: b .LBB3_7
447 ; CHECK-NEXT: .LBB3_6: @ in Loop: Header=BB3_3 Depth=1
448 ; CHECK-NEXT: mov r10, r12
449 ; CHECK-NEXT: mov r8, r12
450 ; CHECK-NEXT: mov r6, r12
451 ; CHECK-NEXT: .LBB3_7: @ %for.cond.cleanup23
452 ; CHECK-NEXT: @ in Loop: Header=BB3_3 Depth=1
453 ; CHECK-NEXT: ldr r3, [sp, #72]
454 ; CHECK-NEXT: add.w r1, r8, r10
455 ; CHECK-NEXT: add r1, r6
456 ; CHECK-NEXT: add r1, r12
457 ; CHECK-NEXT: strb.w r1, [r3, r9]
458 ; CHECK-NEXT: add.w r9, r9, #1
459 ; CHECK-NEXT: cmp r9, r2
460 ; CHECK-NEXT: bne .LBB3_3
461 ; CHECK-NEXT: .LBB3_8: @ %if.end
462 ; CHECK-NEXT: ldr r0, [sp, #72]
463 ; CHECK-NEXT: add sp, #20
464 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
466 %cmp = icmp eq i16 %num_cols, 4
467 br i1 %cmp, label %for.cond.preheader, label %if.end
469 for.cond.preheader: ; preds = %entry
470 %conv2 = zext i16 %output_ch to i32
471 %cmp3114 = icmp eq i16 %output_ch, 0
472 br i1 %cmp3114, label %if.end, label %for.body.lr.ph
474 for.body.lr.ph: ; preds = %for.cond.preheader
475 %conv5 = zext i16 %row_len to i32
476 %add.ptr9 = getelementptr inbounds i8, ptr %input_col, i32 %conv5
477 %mul11 = shl nuw nsw i32 %conv5, 1
478 %add.ptr12 = getelementptr inbounds i8, ptr %input_col, i32 %mul11
479 %mul14 = mul nuw nsw i32 %conv5, 3
480 %add.ptr15 = getelementptr inbounds i8, ptr %input_col, i32 %mul14
481 %add = add nuw nsw i32 %conv5, 7
482 %div = lshr i32 %add, 3
483 %conv25 = trunc i32 %col_offset to i16
484 %.splatinsert = insertelement <8 x i16> undef, i16 %conv25, i32 0
485 %.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
488 for.body: ; preds = %for.cond.cleanup23, %for.body.lr.ph
489 %i_out_ch.0116 = phi i32 [ 0, %for.body.lr.ph ], [ %inc37, %for.cond.cleanup23 ]
490 %i_row_loop.0115 = phi i32 [ undef, %for.body.lr.ph ], [ %i_row_loop.1.lcssa, %for.cond.cleanup23 ]
491 %arrayidx = getelementptr inbounds i32, ptr %bias, i32 %i_out_ch.0116
492 %0 = load i32, ptr %arrayidx, align 4
493 %cmp2199 = icmp slt i32 %i_row_loop.0115, %div
494 br i1 %cmp2199, label %for.body24.preheader, label %for.cond.cleanup23
496 for.body24.preheader: ; preds = %for.body
497 %mul = mul nuw nsw i32 %i_out_ch.0116, %conv5
498 %add.ptr = getelementptr inbounds i8, ptr %input_row, i32 %mul
501 for.cond.cleanup23: ; preds = %for.body24, %for.body
502 %acc_0.0.lcssa = phi i32 [ %0, %for.body ], [ %20, %for.body24 ]
503 %acc_1.0.lcssa = phi i32 [ %0, %for.body ], [ %21, %for.body24 ]
504 %acc_2.0.lcssa = phi i32 [ %0, %for.body ], [ %22, %for.body24 ]
505 %acc_3.0.lcssa = phi i32 [ %0, %for.body ], [ %23, %for.body24 ]
506 %i_row_loop.1.lcssa = phi i32 [ %i_row_loop.0115, %for.body ], [ %div, %for.body24 ]
507 %add31 = add nsw i32 %acc_1.0.lcssa, %acc_0.0.lcssa
508 %add32 = add nsw i32 %add31, %acc_2.0.lcssa
509 %add33 = add nsw i32 %add32, %acc_3.0.lcssa
510 %conv34 = trunc i32 %add33 to i8
511 %arrayidx35 = getelementptr inbounds i8, ptr %out, i32 %i_out_ch.0116
512 store i8 %conv34, ptr %arrayidx35, align 1
513 %inc37 = add nuw nsw i32 %i_out_ch.0116, 1
514 %exitcond120 = icmp eq i32 %inc37, %conv2
515 br i1 %exitcond120, label %if.end, label %for.body
517 for.body24: ; preds = %for.body24, %for.body24.preheader
518 %ip_r0.0109 = phi ptr [ %add.ptr26, %for.body24 ], [ %add.ptr, %for.body24.preheader ]
519 %ip_c0.0108 = phi ptr [ %add.ptr27, %for.body24 ], [ %input_col, %for.body24.preheader ]
520 %ip_c1.0107 = phi ptr [ %add.ptr28, %for.body24 ], [ %add.ptr9, %for.body24.preheader ]
521 %ip_c2.0106 = phi ptr [ %add.ptr29, %for.body24 ], [ %add.ptr12, %for.body24.preheader ]
522 %i_row_loop.1105 = phi i32 [ %inc, %for.body24 ], [ %i_row_loop.0115, %for.body24.preheader ]
523 %ip_c3.0104 = phi ptr [ %add.ptr30, %for.body24 ], [ %add.ptr15, %for.body24.preheader ]
524 %acc_3.0103 = phi i32 [ %23, %for.body24 ], [ %0, %for.body24.preheader ]
525 %acc_2.0102 = phi i32 [ %22, %for.body24 ], [ %0, %for.body24.preheader ]
526 %acc_1.0101 = phi i32 [ %21, %for.body24 ], [ %0, %for.body24.preheader ]
527 %acc_0.0100 = phi i32 [ %20, %for.body24 ], [ %0, %for.body24.preheader ]
528 %1 = bitcast ptr %ip_r0.0109 to ptr
529 %2 = load <8 x i8>, ptr %1, align 1
530 %3 = sext <8 x i8> %2 to <8 x i16>
531 %add.ptr26 = getelementptr inbounds i8, ptr %ip_r0.0109, i32 8
532 %4 = bitcast ptr %ip_c0.0108 to ptr
533 %5 = load <8 x i8>, ptr %4, align 1
534 %6 = sext <8 x i8> %5 to <8 x i16>
535 %add.ptr27 = getelementptr inbounds i8, ptr %ip_c0.0108, i32 8
536 %7 = add <8 x i16> %.splat, %6
537 %8 = bitcast ptr %ip_c1.0107 to ptr
538 %9 = load <8 x i8>, ptr %8, align 1
539 %10 = sext <8 x i8> %9 to <8 x i16>
540 %add.ptr28 = getelementptr inbounds i8, ptr %ip_c1.0107, i32 8
541 %11 = add <8 x i16> %.splat, %10
542 %12 = bitcast ptr %ip_c2.0106 to ptr
543 %13 = load <8 x i8>, ptr %12, align 1
544 %14 = sext <8 x i8> %13 to <8 x i16>
545 %add.ptr29 = getelementptr inbounds i8, ptr %ip_c2.0106, i32 8
546 %15 = add <8 x i16> %.splat, %14
547 %16 = bitcast ptr %ip_c3.0104 to ptr
548 %17 = load <8 x i8>, ptr %16, align 1
549 %18 = sext <8 x i8> %17 to <8 x i16>
550 %add.ptr30 = getelementptr inbounds i8, ptr %ip_c3.0104, i32 8
551 %19 = add <8 x i16> %.splat, %18
552 %20 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %acc_0.0100, <8 x i16> %3, <8 x i16> %7)
553 %21 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %acc_1.0101, <8 x i16> %3, <8 x i16> %11)
554 %22 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %acc_2.0102, <8 x i16> %3, <8 x i16> %15)
555 %23 = tail call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 %acc_3.0103, <8 x i16> %3, <8 x i16> %19)
556 %inc = add nsw i32 %i_row_loop.1105, 1
557 %exitcond = icmp eq i32 %inc, %div
558 br i1 %exitcond, label %for.cond.cleanup23, label %for.body24
560 if.end: ; preds = %for.cond.cleanup23, %for.cond.preheader, %entry
565 ; Similar but predicated
566 define i32 @arm_nn_mat_mul_core_4x_s8(i32 %row_elements, i32 %offset, ptr %row_base, ptr %col_base, ptr nocapture readnone %sum_col, ptr nocapture %output) {
567 ; CHECK-LABEL: arm_nn_mat_mul_core_4x_s8:
568 ; CHECK: @ %bb.0: @ %entry
569 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r10, lr}
570 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r10, lr}
571 ; CHECK-NEXT: ldr.w r12, [sp, #32]
572 ; CHECK-NEXT: cmp r0, #1
573 ; CHECK-NEXT: blt .LBB4_3
574 ; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
575 ; CHECK-NEXT: add.w r5, r2, r1, lsl #1
576 ; CHECK-NEXT: mov.w r8, #0
577 ; CHECK-NEXT: movs r4, #0
578 ; CHECK-NEXT: mov.w r10, #0
579 ; CHECK-NEXT: movs r6, #0
580 ; CHECK-NEXT: adds r7, r2, r1
581 ; CHECK-NEXT: add.w r1, r1, r1, lsl #1
582 ; CHECK-NEXT: add r1, r2
583 ; CHECK-NEXT: dlstp.8 lr, r0
584 ; CHECK-NEXT: .LBB4_2: @ %for.body
585 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
586 ; CHECK-NEXT: vldrb.u8 q0, [r3], #16
587 ; CHECK-NEXT: vldrb.u8 q1, [r1], #16
588 ; CHECK-NEXT: vmlava.s8 r10, q1, q0
589 ; CHECK-NEXT: vldrb.u8 q1, [r5], #16
590 ; CHECK-NEXT: vmlava.s8 r4, q1, q0
591 ; CHECK-NEXT: vldrb.u8 q1, [r7], #16
592 ; CHECK-NEXT: vmlava.s8 r6, q1, q0
593 ; CHECK-NEXT: vldrb.u8 q1, [r2], #16
594 ; CHECK-NEXT: vmlava.s8 r8, q1, q0
595 ; CHECK-NEXT: letp lr, .LBB4_2
596 ; CHECK-NEXT: b .LBB4_4
597 ; CHECK-NEXT: .LBB4_3:
598 ; CHECK-NEXT: mov.w r10, #0
599 ; CHECK-NEXT: movs r4, #0
600 ; CHECK-NEXT: movs r6, #0
601 ; CHECK-NEXT: mov.w r8, #0
602 ; CHECK-NEXT: .LBB4_4: @ %for.cond.cleanup
603 ; CHECK-NEXT: movs r0, #0
604 ; CHECK-NEXT: strd r8, r6, [r12]
605 ; CHECK-NEXT: strd r4, r10, [r12, #8]
606 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r10, pc}
608 %add = add nsw i32 %row_elements, 15
609 %div = sdiv i32 %add, 16
610 %cmp84 = icmp sgt i32 %row_elements, 0
611 br i1 %cmp84, label %for.body.preheader, label %for.cond.cleanup
613 for.body.preheader: ; preds = %entry
614 %mul2 = mul nsw i32 %offset, 3
615 %add.ptr3 = getelementptr inbounds i8, ptr %row_base, i32 %mul2
616 %mul = shl nsw i32 %offset, 1
617 %add.ptr1 = getelementptr inbounds i8, ptr %row_base, i32 %mul
618 %add.ptr = getelementptr inbounds i8, ptr %row_base, i32 %offset
619 %0 = icmp sgt i32 %div, 1
620 %smax = select i1 %0, i32 %div, i32 1
623 for.cond.cleanup: ; preds = %for.body, %entry
624 %acc_n.sroa.12.0.lcssa = phi i32 [ 0, %entry ], [ %15, %for.body ]
625 %acc_n.sroa.9.0.lcssa = phi i32 [ 0, %entry ], [ %12, %for.body ]
626 %acc_n.sroa.6.0.lcssa = phi i32 [ 0, %entry ], [ %9, %for.body ]
627 %acc_n.sroa.0.0.lcssa = phi i32 [ 0, %entry ], [ %6, %for.body ]
628 store i32 %acc_n.sroa.0.0.lcssa, ptr %output, align 4
629 %arrayidx19 = getelementptr inbounds i32, ptr %output, i32 1
630 store i32 %acc_n.sroa.6.0.lcssa, ptr %arrayidx19, align 4
631 %arrayidx21 = getelementptr inbounds i32, ptr %output, i32 2
632 store i32 %acc_n.sroa.9.0.lcssa, ptr %arrayidx21, align 4
633 %arrayidx23 = getelementptr inbounds i32, ptr %output, i32 3
634 store i32 %acc_n.sroa.12.0.lcssa, ptr %arrayidx23, align 4
637 for.body: ; preds = %for.body, %for.body.preheader
638 %col_base.addr.095 = phi ptr [ %add.ptr4, %for.body ], [ %col_base, %for.body.preheader ]
639 %acc_n.sroa.0.094 = phi i32 [ %6, %for.body ], [ 0, %for.body.preheader ]
640 %acc_n.sroa.6.093 = phi i32 [ %9, %for.body ], [ 0, %for.body.preheader ]
641 %acc_n.sroa.9.092 = phi i32 [ %12, %for.body ], [ 0, %for.body.preheader ]
642 %i.091 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
643 %row_elem.090 = phi i32 [ %sub, %for.body ], [ %row_elements, %for.body.preheader ]
644 %acc_n.sroa.12.089 = phi i32 [ %15, %for.body ], [ 0, %for.body.preheader ]
645 %ip_row_3.088 = phi ptr [ %add.ptr15, %for.body ], [ %add.ptr3, %for.body.preheader ]
646 %ip_row_2.087 = phi ptr [ %add.ptr14, %for.body ], [ %add.ptr1, %for.body.preheader ]
647 %ip_row_1.086 = phi ptr [ %add.ptr13, %for.body ], [ %add.ptr, %for.body.preheader ]
648 %ip_row_0.085 = phi ptr [ %add.ptr12, %for.body ], [ %row_base, %for.body.preheader ]
649 %1 = tail call <16 x i1> @llvm.arm.mve.vctp8(i32 %row_elem.090)
650 %sub = add nsw i32 %row_elem.090, -16
651 %2 = bitcast ptr %col_base.addr.095 to ptr
652 %3 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %2, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
653 %add.ptr4 = getelementptr inbounds i8, ptr %col_base.addr.095, i32 16
654 %4 = bitcast ptr %ip_row_0.085 to ptr
655 %5 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %4, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
656 %6 = tail call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 0, i32 %acc_n.sroa.0.094, <16 x i8> %5, <16 x i8> %3, <16 x i1> %1)
657 %7 = bitcast ptr %ip_row_1.086 to ptr
658 %8 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %7, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
659 %9 = tail call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 0, i32 %acc_n.sroa.6.093, <16 x i8> %8, <16 x i8> %3, <16 x i1> %1)
660 %10 = bitcast ptr %ip_row_2.087 to ptr
661 %11 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %10, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
662 %12 = tail call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 0, i32 %acc_n.sroa.9.092, <16 x i8> %11, <16 x i8> %3, <16 x i1> %1)
663 %13 = bitcast ptr %ip_row_3.088 to ptr
664 %14 = tail call <16 x i8> @llvm.masked.load.v16i8.p0(ptr %13, i32 1, <16 x i1> %1, <16 x i8> zeroinitializer)
665 %15 = tail call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 0, i32 %acc_n.sroa.12.089, <16 x i8> %14, <16 x i8> %3, <16 x i1> %1)
666 %add.ptr12 = getelementptr inbounds i8, ptr %ip_row_0.085, i32 16
667 %add.ptr13 = getelementptr inbounds i8, ptr %ip_row_1.086, i32 16
668 %add.ptr14 = getelementptr inbounds i8, ptr %ip_row_2.087, i32 16
669 %add.ptr15 = getelementptr inbounds i8, ptr %ip_row_3.088, i32 16
670 %inc = add nuw nsw i32 %i.091, 1
671 %exitcond = icmp eq i32 %inc, %smax
672 br i1 %exitcond, label %for.cond.cleanup, label %for.body
675 define ptr @signext(ptr %input_row, ptr %input_col, i16 zeroext %output_ch, i16 zeroext %num_cols, ptr nocapture readnone %output_shift, ptr nocapture readnone %output_mult, i32 %out_offset, i32 %col_offset, i32 %row_offset, i16 signext %activation_min, i16 signext %activation_max, i16 zeroext %row_len, ptr nocapture readonly %bias, ptr returned %out) {
676 ; CHECK-LABEL: signext:
677 ; CHECK: @ %bb.0: @ %entry
678 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
679 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
680 ; CHECK-NEXT: .pad #28
681 ; CHECK-NEXT: sub sp, #28
682 ; CHECK-NEXT: add.w r12, sp, #12
683 ; CHECK-NEXT: cmp r3, #4
684 ; CHECK-NEXT: stm.w r12, {r0, r1, r2} @ 12-byte Folded Spill
685 ; CHECK-NEXT: bne .LBB5_8
686 ; CHECK-NEXT: @ %bb.1: @ %for.cond.preheader
687 ; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload
688 ; CHECK-NEXT: cmp r0, #0
689 ; CHECK-NEXT: beq .LBB5_8
690 ; CHECK-NEXT: @ %bb.2: @ %for.body.lr.ph
691 ; CHECK-NEXT: ldr r2, [sp, #92]
692 ; CHECK-NEXT: mov.w r11, #0
693 ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
694 ; CHECK-NEXT: ldr r4, [sp, #76]
695 ; CHECK-NEXT: add.w r0, r1, r2, lsl #1
696 ; CHECK-NEXT: str r0, [sp, #8] @ 4-byte Spill
697 ; CHECK-NEXT: adds r0, r1, r2
698 ; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill
699 ; CHECK-NEXT: add.w r0, r2, r2, lsl #1
700 ; CHECK-NEXT: add r0, r1
701 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill
702 ; CHECK-NEXT: adds r0, r2, #7
703 ; CHECK-NEXT: lsrs r1, r0, #3
704 ; CHECK-NEXT: b .LBB5_5
705 ; CHECK-NEXT: .LBB5_3: @ in Loop: Header=BB5_5 Depth=1
706 ; CHECK-NEXT: mov r10, r12
707 ; CHECK-NEXT: mov r8, r12
708 ; CHECK-NEXT: mov r6, r12
709 ; CHECK-NEXT: .LBB5_4: @ %for.cond.cleanup23
710 ; CHECK-NEXT: @ in Loop: Header=BB5_5 Depth=1
711 ; CHECK-NEXT: add.w r0, r8, r10
712 ; CHECK-NEXT: ldr r1, [sp, #100]
713 ; CHECK-NEXT: add r0, r6
714 ; CHECK-NEXT: add r0, r12
715 ; CHECK-NEXT: strb.w r0, [r1, r11]
716 ; CHECK-NEXT: add.w r11, r11, #1
717 ; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload
718 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
719 ; CHECK-NEXT: cmp r11, r0
720 ; CHECK-NEXT: beq .LBB5_8
721 ; CHECK-NEXT: .LBB5_5: @ %for.body
722 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
723 ; CHECK-NEXT: @ Child Loop BB5_7 Depth 2
724 ; CHECK-NEXT: ldr r0, [sp, #96]
725 ; CHECK-NEXT: cmp r1, r1
726 ; CHECK-NEXT: str r1, [sp, #24] @ 4-byte Spill
727 ; CHECK-NEXT: ldr.w r12, [r0, r11, lsl #2]
728 ; CHECK-NEXT: ble .LBB5_3
729 ; CHECK-NEXT: @ %bb.6: @ %for.body24.preheader
730 ; CHECK-NEXT: @ in Loop: Header=BB5_5 Depth=1
731 ; CHECK-NEXT: ldr.w lr, [sp, #92]
732 ; CHECK-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
733 ; CHECK-NEXT: mov r6, r12
734 ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
735 ; CHECK-NEXT: mov r8, r12
736 ; CHECK-NEXT: mla r3, r11, lr, r0
737 ; CHECK-NEXT: mov r10, r12
738 ; CHECK-NEXT: ldm.w sp, {r0, r5, r7} @ 12-byte Folded Reload
739 ; CHECK-NEXT: dlstp.16 lr, lr
740 ; CHECK-NEXT: .LBB5_7: @ %for.body24
741 ; CHECK-NEXT: @ Parent Loop BB5_5 Depth=1
742 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
743 ; CHECK-NEXT: vldrb.s16 q0, [r0], #8
744 ; CHECK-NEXT: vadd.i16 q1, q0, r4
745 ; CHECK-NEXT: vldrb.s16 q0, [r3], #8
746 ; CHECK-NEXT: vmlava.s16 r12, q0, q1
747 ; CHECK-NEXT: vldrb.s16 q1, [r7], #8
748 ; CHECK-NEXT: vadd.i16 q1, q1, r4
749 ; CHECK-NEXT: vmlava.s16 r6, q0, q1
750 ; CHECK-NEXT: vldrb.s16 q1, [r5], #8
751 ; CHECK-NEXT: vadd.i16 q1, q1, r4
752 ; CHECK-NEXT: vmlava.s16 r8, q0, q1
753 ; CHECK-NEXT: vldrb.s16 q1, [r1], #8
754 ; CHECK-NEXT: vadd.i16 q1, q1, r4
755 ; CHECK-NEXT: vmlava.s16 r10, q0, q1
756 ; CHECK-NEXT: letp lr, .LBB5_7
757 ; CHECK-NEXT: b .LBB5_4
758 ; CHECK-NEXT: .LBB5_8: @ %if.end
759 ; CHECK-NEXT: ldr r0, [sp, #100]
760 ; CHECK-NEXT: add sp, #28
761 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
763 %cmp = icmp eq i16 %num_cols, 4
764 br i1 %cmp, label %for.cond.preheader, label %if.end
766 for.cond.preheader: ; preds = %entry
767 %conv2 = zext i16 %output_ch to i32
768 %cmp3127 = icmp eq i16 %output_ch, 0
769 br i1 %cmp3127, label %if.end, label %for.body.lr.ph
771 for.body.lr.ph: ; preds = %for.cond.preheader
772 %conv5 = zext i16 %row_len to i32
773 %add.ptr9 = getelementptr inbounds i8, ptr %input_col, i32 %conv5
774 %mul11 = shl nuw nsw i32 %conv5, 1
775 %add.ptr12 = getelementptr inbounds i8, ptr %input_col, i32 %mul11
776 %mul14 = mul nuw nsw i32 %conv5, 3
777 %add.ptr15 = getelementptr inbounds i8, ptr %input_col, i32 %mul14
778 %add = add nuw nsw i32 %conv5, 7
779 %div = lshr i32 %add, 3
780 %conv25 = trunc i32 %col_offset to i16
781 %.splatinsert.i = insertelement <8 x i16> undef, i16 %conv25, i32 0
782 %.splat.i = shufflevector <8 x i16> %.splatinsert.i, <8 x i16> undef, <8 x i32> zeroinitializer
785 for.body: ; preds = %for.cond.cleanup23, %for.body.lr.ph
786 %i_out_ch.0129 = phi i32 [ 0, %for.body.lr.ph ], [ %inc37, %for.cond.cleanup23 ]
787 %i_row_loop.0128 = phi i32 [ undef, %for.body.lr.ph ], [ %i_row_loop.1.lcssa, %for.cond.cleanup23 ]
788 %arrayidx = getelementptr inbounds i32, ptr %bias, i32 %i_out_ch.0129
789 %0 = load i32, ptr %arrayidx, align 4
790 %cmp21111 = icmp slt i32 %i_row_loop.0128, %div
791 br i1 %cmp21111, label %for.body24.preheader, label %for.cond.cleanup23
793 for.body24.preheader: ; preds = %for.body
794 %mul = mul nuw nsw i32 %i_out_ch.0129, %conv5
795 %add.ptr = getelementptr inbounds i8, ptr %input_row, i32 %mul
798 for.cond.cleanup23: ; preds = %for.body24, %for.body
799 %acc_0.0.lcssa = phi i32 [ %0, %for.body ], [ %21, %for.body24 ]
800 %acc_1.0.lcssa = phi i32 [ %0, %for.body ], [ %22, %for.body24 ]
801 %acc_2.0.lcssa = phi i32 [ %0, %for.body ], [ %23, %for.body24 ]
802 %acc_3.0.lcssa = phi i32 [ %0, %for.body ], [ %24, %for.body24 ]
803 %i_row_loop.1.lcssa = phi i32 [ %i_row_loop.0128, %for.body ], [ %div, %for.body24 ]
804 %add31 = add nsw i32 %acc_1.0.lcssa, %acc_0.0.lcssa
805 %add32 = add nsw i32 %add31, %acc_2.0.lcssa
806 %add33 = add nsw i32 %add32, %acc_3.0.lcssa
807 %conv34 = trunc i32 %add33 to i8
808 %arrayidx35 = getelementptr inbounds i8, ptr %out, i32 %i_out_ch.0129
809 store i8 %conv34, ptr %arrayidx35, align 1
810 %inc37 = add nuw nsw i32 %i_out_ch.0129, 1
811 %exitcond133 = icmp eq i32 %inc37, %conv2
812 br i1 %exitcond133, label %if.end, label %for.body
814 for.body24: ; preds = %for.body24, %for.body24.preheader
815 %row_len_tmp.0122 = phi i32 [ %sub, %for.body24 ], [ %conv5, %for.body24.preheader ]
816 %ip_r0.0121 = phi ptr [ %add.ptr26, %for.body24 ], [ %add.ptr, %for.body24.preheader ]
817 %ip_c0.0120 = phi ptr [ %add.ptr27, %for.body24 ], [ %input_col, %for.body24.preheader ]
818 %ip_c1.0119 = phi ptr [ %add.ptr28, %for.body24 ], [ %add.ptr9, %for.body24.preheader ]
819 %ip_c2.0118 = phi ptr [ %add.ptr29, %for.body24 ], [ %add.ptr12, %for.body24.preheader ]
820 %i_row_loop.1117 = phi i32 [ %inc, %for.body24 ], [ %i_row_loop.0128, %for.body24.preheader ]
821 %ip_c3.0116 = phi ptr [ %add.ptr30, %for.body24 ], [ %add.ptr15, %for.body24.preheader ]
822 %acc_3.0115 = phi i32 [ %24, %for.body24 ], [ %0, %for.body24.preheader ]
823 %acc_2.0114 = phi i32 [ %23, %for.body24 ], [ %0, %for.body24.preheader ]
824 %acc_1.0113 = phi i32 [ %22, %for.body24 ], [ %0, %for.body24.preheader ]
825 %acc_0.0112 = phi i32 [ %21, %for.body24 ], [ %0, %for.body24.preheader ]
826 %1 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %row_len_tmp.0122)
827 %sub = add nsw i32 %row_len_tmp.0122, -8
828 %2 = bitcast ptr %ip_r0.0121 to ptr
829 %3 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %2, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
830 %4 = sext <8 x i8> %3 to <8 x i16>
831 %add.ptr26 = getelementptr inbounds i8, ptr %ip_r0.0121, i32 8
832 %5 = bitcast ptr %ip_c0.0120 to ptr
833 %6 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %5, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
834 %7 = sext <8 x i8> %6 to <8 x i16>
835 %add.ptr27 = getelementptr inbounds i8, ptr %ip_c0.0120, i32 8
836 %8 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %7, <8 x i16> %.splat.i, <8 x i1> %1, <8 x i16> undef)
837 %9 = bitcast ptr %ip_c1.0119 to ptr
838 %10 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %9, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
839 %11 = sext <8 x i8> %10 to <8 x i16>
840 %add.ptr28 = getelementptr inbounds i8, ptr %ip_c1.0119, i32 8
841 %12 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %11, <8 x i16> %.splat.i, <8 x i1> %1, <8 x i16> undef)
842 %13 = bitcast ptr %ip_c2.0118 to ptr
843 %14 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %13, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
844 %15 = sext <8 x i8> %14 to <8 x i16>
845 %add.ptr29 = getelementptr inbounds i8, ptr %ip_c2.0118, i32 8
846 %16 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %15, <8 x i16> %.splat.i, <8 x i1> %1, <8 x i16> undef)
847 %17 = bitcast ptr %ip_c3.0116 to ptr
848 %18 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %17, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
849 %19 = sext <8 x i8> %18 to <8 x i16>
850 %add.ptr30 = getelementptr inbounds i8, ptr %ip_c3.0116, i32 8
851 %20 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %19, <8 x i16> %.splat.i, <8 x i1> %1, <8 x i16> undef)
852 %21 = tail call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 %acc_0.0112, <8 x i16> %4, <8 x i16> %8, <8 x i1> %1)
853 %22 = tail call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 %acc_1.0113, <8 x i16> %4, <8 x i16> %12, <8 x i1> %1)
854 %23 = tail call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 %acc_2.0114, <8 x i16> %4, <8 x i16> %16, <8 x i1> %1)
855 %24 = tail call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 %acc_3.0115, <8 x i16> %4, <8 x i16> %20, <8 x i1> %1)
856 %inc = add nsw i32 %i_row_loop.1117, 1
857 %exitcond = icmp eq i32 %inc, %div
858 br i1 %exitcond, label %for.cond.cleanup23, label %for.body24
860 if.end: ; preds = %for.cond.cleanup23, %for.cond.preheader, %entry
864 define ptr @signext_optsize(ptr %input_row, ptr %input_col, i16 zeroext %output_ch, i16 zeroext %num_cols, ptr nocapture readnone %output_shift, ptr nocapture readnone %output_mult, i32 %out_offset, i32 %col_offset, i32 %row_offset, i16 signext %activation_min, i16 signext %activation_max, i16 zeroext %row_len, ptr nocapture readonly %bias, ptr returned %out) optsize {
865 ; CHECK-LABEL: signext_optsize:
866 ; CHECK: @ %bb.0: @ %entry
867 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
868 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
869 ; CHECK-NEXT: .pad #28
870 ; CHECK-NEXT: sub sp, #28
871 ; CHECK-NEXT: add.w r12, sp, #12
872 ; CHECK-NEXT: cmp r3, #4
873 ; CHECK-NEXT: stm.w r12, {r0, r1, r2} @ 12-byte Folded Spill
874 ; CHECK-NEXT: bne .LBB6_8
875 ; CHECK-NEXT: @ %bb.1: @ %for.cond.preheader
876 ; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload
877 ; CHECK-NEXT: cmp r0, #0
878 ; CHECK-NEXT: beq .LBB6_8
879 ; CHECK-NEXT: @ %bb.2: @ %for.body.lr.ph
880 ; CHECK-NEXT: ldr r2, [sp, #92]
881 ; CHECK-NEXT: mov.w r11, #0
882 ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
883 ; CHECK-NEXT: ldr r4, [sp, #76]
884 ; CHECK-NEXT: add.w r0, r1, r2, lsl #1
885 ; CHECK-NEXT: str r0, [sp, #8] @ 4-byte Spill
886 ; CHECK-NEXT: adds r0, r1, r2
887 ; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill
888 ; CHECK-NEXT: add.w r0, r2, r2, lsl #1
889 ; CHECK-NEXT: add r0, r1
890 ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill
891 ; CHECK-NEXT: adds r0, r2, #7
892 ; CHECK-NEXT: lsrs r1, r0, #3
893 ; CHECK-NEXT: .LBB6_3: @ %for.body
894 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
895 ; CHECK-NEXT: @ Child Loop BB6_5 Depth 2
896 ; CHECK-NEXT: ldr r0, [sp, #96]
897 ; CHECK-NEXT: cmp r1, r1
898 ; CHECK-NEXT: str r1, [sp, #24] @ 4-byte Spill
899 ; CHECK-NEXT: ldr.w r12, [r0, r11, lsl #2]
900 ; CHECK-NEXT: ble .LBB6_6
901 ; CHECK-NEXT: @ %bb.4: @ %for.body24.preheader
902 ; CHECK-NEXT: @ in Loop: Header=BB6_3 Depth=1
903 ; CHECK-NEXT: ldr.w lr, [sp, #92]
904 ; CHECK-NEXT: ldr r0, [sp, #12] @ 4-byte Reload
905 ; CHECK-NEXT: mov r6, r12
906 ; CHECK-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
907 ; CHECK-NEXT: mov r8, r12
908 ; CHECK-NEXT: mla r3, r11, lr, r0
909 ; CHECK-NEXT: mov r10, r12
910 ; CHECK-NEXT: ldm.w sp, {r0, r5, r7} @ 12-byte Folded Reload
911 ; CHECK-NEXT: dlstp.16 lr, lr
912 ; CHECK-NEXT: .LBB6_5: @ %for.body24
913 ; CHECK-NEXT: @ Parent Loop BB6_3 Depth=1
914 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
915 ; CHECK-NEXT: vldrb.s16 q0, [r0], #8
916 ; CHECK-NEXT: vadd.i16 q1, q0, r4
917 ; CHECK-NEXT: vldrb.s16 q0, [r3], #8
918 ; CHECK-NEXT: vmlava.s16 r12, q0, q1
919 ; CHECK-NEXT: vldrb.s16 q1, [r7], #8
920 ; CHECK-NEXT: vadd.i16 q1, q1, r4
921 ; CHECK-NEXT: vmlava.s16 r6, q0, q1
922 ; CHECK-NEXT: vldrb.s16 q1, [r5], #8
923 ; CHECK-NEXT: vadd.i16 q1, q1, r4
924 ; CHECK-NEXT: vmlava.s16 r8, q0, q1
925 ; CHECK-NEXT: vldrb.s16 q1, [r1], #8
926 ; CHECK-NEXT: vadd.i16 q1, q1, r4
927 ; CHECK-NEXT: vmlava.s16 r10, q0, q1
928 ; CHECK-NEXT: letp lr, .LBB6_5
929 ; CHECK-NEXT: b .LBB6_7
930 ; CHECK-NEXT: .LBB6_6: @ in Loop: Header=BB6_3 Depth=1
931 ; CHECK-NEXT: mov r10, r12
932 ; CHECK-NEXT: mov r8, r12
933 ; CHECK-NEXT: mov r6, r12
934 ; CHECK-NEXT: .LBB6_7: @ %for.cond.cleanup23
935 ; CHECK-NEXT: @ in Loop: Header=BB6_3 Depth=1
936 ; CHECK-NEXT: add.w r0, r8, r10
937 ; CHECK-NEXT: ldr r1, [sp, #100]
938 ; CHECK-NEXT: add r0, r6
939 ; CHECK-NEXT: add r0, r12
940 ; CHECK-NEXT: strb.w r0, [r1, r11]
941 ; CHECK-NEXT: add.w r11, r11, #1
942 ; CHECK-NEXT: ldr r0, [sp, #20] @ 4-byte Reload
943 ; CHECK-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
944 ; CHECK-NEXT: cmp r11, r0
945 ; CHECK-NEXT: bne .LBB6_3
946 ; CHECK-NEXT: .LBB6_8: @ %if.end
947 ; CHECK-NEXT: ldr r0, [sp, #100]
948 ; CHECK-NEXT: add sp, #28
949 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
951 %cmp = icmp eq i16 %num_cols, 4
952 br i1 %cmp, label %for.cond.preheader, label %if.end
954 for.cond.preheader: ; preds = %entry
955 %conv2 = zext i16 %output_ch to i32
956 %cmp3127 = icmp eq i16 %output_ch, 0
957 br i1 %cmp3127, label %if.end, label %for.body.lr.ph
959 for.body.lr.ph: ; preds = %for.cond.preheader
960 %conv5 = zext i16 %row_len to i32
961 %add.ptr9 = getelementptr inbounds i8, ptr %input_col, i32 %conv5
962 %mul11 = shl nuw nsw i32 %conv5, 1
963 %add.ptr12 = getelementptr inbounds i8, ptr %input_col, i32 %mul11
964 %mul14 = mul nuw nsw i32 %conv5, 3
965 %add.ptr15 = getelementptr inbounds i8, ptr %input_col, i32 %mul14
966 %add = add nuw nsw i32 %conv5, 7
967 %div = lshr i32 %add, 3
968 %conv25 = trunc i32 %col_offset to i16
969 %.splatinsert.i = insertelement <8 x i16> undef, i16 %conv25, i32 0
970 %.splat.i = shufflevector <8 x i16> %.splatinsert.i, <8 x i16> undef, <8 x i32> zeroinitializer
973 for.body: ; preds = %for.cond.cleanup23, %for.body.lr.ph
974 %i_out_ch.0129 = phi i32 [ 0, %for.body.lr.ph ], [ %inc37, %for.cond.cleanup23 ]
975 %i_row_loop.0128 = phi i32 [ undef, %for.body.lr.ph ], [ %i_row_loop.1.lcssa, %for.cond.cleanup23 ]
976 %arrayidx = getelementptr inbounds i32, ptr %bias, i32 %i_out_ch.0129
977 %0 = load i32, ptr %arrayidx, align 4
978 %cmp21111 = icmp slt i32 %i_row_loop.0128, %div
979 br i1 %cmp21111, label %for.body24.preheader, label %for.cond.cleanup23
981 for.body24.preheader: ; preds = %for.body
982 %mul = mul nuw nsw i32 %i_out_ch.0129, %conv5
983 %add.ptr = getelementptr inbounds i8, ptr %input_row, i32 %mul
986 for.cond.cleanup23: ; preds = %for.body24, %for.body
987 %acc_0.0.lcssa = phi i32 [ %0, %for.body ], [ %21, %for.body24 ]
988 %acc_1.0.lcssa = phi i32 [ %0, %for.body ], [ %22, %for.body24 ]
989 %acc_2.0.lcssa = phi i32 [ %0, %for.body ], [ %23, %for.body24 ]
990 %acc_3.0.lcssa = phi i32 [ %0, %for.body ], [ %24, %for.body24 ]
991 %i_row_loop.1.lcssa = phi i32 [ %i_row_loop.0128, %for.body ], [ %div, %for.body24 ]
992 %add31 = add nsw i32 %acc_1.0.lcssa, %acc_0.0.lcssa
993 %add32 = add nsw i32 %add31, %acc_2.0.lcssa
994 %add33 = add nsw i32 %add32, %acc_3.0.lcssa
995 %conv34 = trunc i32 %add33 to i8
996 %arrayidx35 = getelementptr inbounds i8, ptr %out, i32 %i_out_ch.0129
997 store i8 %conv34, ptr %arrayidx35, align 1
998 %inc37 = add nuw nsw i32 %i_out_ch.0129, 1
999 %exitcond133 = icmp eq i32 %inc37, %conv2
1000 br i1 %exitcond133, label %if.end, label %for.body
1002 for.body24: ; preds = %for.body24, %for.body24.preheader
1003 %row_len_tmp.0122 = phi i32 [ %sub, %for.body24 ], [ %conv5, %for.body24.preheader ]
1004 %ip_r0.0121 = phi ptr [ %add.ptr26, %for.body24 ], [ %add.ptr, %for.body24.preheader ]
1005 %ip_c0.0120 = phi ptr [ %add.ptr27, %for.body24 ], [ %input_col, %for.body24.preheader ]
1006 %ip_c1.0119 = phi ptr [ %add.ptr28, %for.body24 ], [ %add.ptr9, %for.body24.preheader ]
1007 %ip_c2.0118 = phi ptr [ %add.ptr29, %for.body24 ], [ %add.ptr12, %for.body24.preheader ]
1008 %i_row_loop.1117 = phi i32 [ %inc, %for.body24 ], [ %i_row_loop.0128, %for.body24.preheader ]
1009 %ip_c3.0116 = phi ptr [ %add.ptr30, %for.body24 ], [ %add.ptr15, %for.body24.preheader ]
1010 %acc_3.0115 = phi i32 [ %24, %for.body24 ], [ %0, %for.body24.preheader ]
1011 %acc_2.0114 = phi i32 [ %23, %for.body24 ], [ %0, %for.body24.preheader ]
1012 %acc_1.0113 = phi i32 [ %22, %for.body24 ], [ %0, %for.body24.preheader ]
1013 %acc_0.0112 = phi i32 [ %21, %for.body24 ], [ %0, %for.body24.preheader ]
1014 %1 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %row_len_tmp.0122)
1015 %sub = add nsw i32 %row_len_tmp.0122, -8
1016 %2 = bitcast ptr %ip_r0.0121 to ptr
1017 %3 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %2, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
1018 %4 = sext <8 x i8> %3 to <8 x i16>
1019 %add.ptr26 = getelementptr inbounds i8, ptr %ip_r0.0121, i32 8
1020 %5 = bitcast ptr %ip_c0.0120 to ptr
1021 %6 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %5, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
1022 %7 = sext <8 x i8> %6 to <8 x i16>
1023 %add.ptr27 = getelementptr inbounds i8, ptr %ip_c0.0120, i32 8
1024 %8 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %7, <8 x i16> %.splat.i, <8 x i1> %1, <8 x i16> undef)
1025 %9 = bitcast ptr %ip_c1.0119 to ptr
1026 %10 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %9, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
1027 %11 = sext <8 x i8> %10 to <8 x i16>
1028 %add.ptr28 = getelementptr inbounds i8, ptr %ip_c1.0119, i32 8
1029 %12 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %11, <8 x i16> %.splat.i, <8 x i1> %1, <8 x i16> undef)
1030 %13 = bitcast ptr %ip_c2.0118 to ptr
1031 %14 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %13, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
1032 %15 = sext <8 x i8> %14 to <8 x i16>
1033 %add.ptr29 = getelementptr inbounds i8, ptr %ip_c2.0118, i32 8
1034 %16 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %15, <8 x i16> %.splat.i, <8 x i1> %1, <8 x i16> undef)
1035 %17 = bitcast ptr %ip_c3.0116 to ptr
1036 %18 = tail call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %17, i32 1, <8 x i1> %1, <8 x i8> zeroinitializer)
1037 %19 = sext <8 x i8> %18 to <8 x i16>
1038 %add.ptr30 = getelementptr inbounds i8, ptr %ip_c3.0116, i32 8
1039 %20 = tail call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %19, <8 x i16> %.splat.i, <8 x i1> %1, <8 x i16> undef)
1040 %21 = tail call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 %acc_0.0112, <8 x i16> %4, <8 x i16> %8, <8 x i1> %1)
1041 %22 = tail call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 %acc_1.0113, <8 x i16> %4, <8 x i16> %12, <8 x i1> %1)
1042 %23 = tail call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 %acc_2.0114, <8 x i16> %4, <8 x i16> %16, <8 x i1> %1)
1043 %24 = tail call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 %acc_3.0115, <8 x i16> %4, <8 x i16> %20, <8 x i1> %1)
1044 %inc = add nsw i32 %i_row_loop.1117, 1
1045 %exitcond = icmp eq i32 %inc, %div
1046 br i1 %exitcond, label %for.cond.cleanup23, label %for.body24
1048 if.end: ; preds = %for.cond.cleanup23, %for.cond.preheader, %entry
1052 %struct.arm_cfft_instance_f32 = type { i16, ptr, ptr, i16, ptr, ptr, ptr, ptr, ptr, ptr }
1053 define arm_aapcs_vfpcc void @_Z37_arm_radix4_butterfly_inverse_f32_mvePK21arm_cfft_instance_f32Pfjf(ptr nocapture readonly %0, ptr %1, i32 %2, float %3) {
1054 ; CHECK-LABEL: _Z37_arm_radix4_butterfly_inverse_f32_mvePK21arm_cfft_instance_f32Pfjf:
1056 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
1057 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
1058 ; CHECK-NEXT: .pad #4
1059 ; CHECK-NEXT: sub sp, #4
1060 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
1061 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
1062 ; CHECK-NEXT: .pad #40
1063 ; CHECK-NEXT: sub sp, #40
1064 ; CHECK-NEXT: cmp r2, #8
1065 ; CHECK-NEXT: vstr s0, [sp] @ 4-byte Spill
1066 ; CHECK-NEXT: str r1, [sp, #16] @ 4-byte Spill
1067 ; CHECK-NEXT: str r2, [sp, #4] @ 4-byte Spill
1068 ; CHECK-NEXT: blo .LBB7_9
1069 ; CHECK-NEXT: @ %bb.1:
1070 ; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload
1071 ; CHECK-NEXT: mov.w r11, #0
1072 ; CHECK-NEXT: mov.w r12, #1
1073 ; CHECK-NEXT: str r2, [sp, #12] @ 4-byte Spill
1074 ; CHECK-NEXT: lsrs r1, r2, #2
1075 ; CHECK-NEXT: b .LBB7_3
1076 ; CHECK-NEXT: .LBB7_2: @ in Loop: Header=BB7_3 Depth=1
1077 ; CHECK-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
1078 ; CHECK-NEXT: add.w r11, r11, #1
1079 ; CHECK-NEXT: lsl.w r12, r12, #2
1080 ; CHECK-NEXT: cmp r2, #7
1081 ; CHECK-NEXT: asr.w r1, r2, #2
1082 ; CHECK-NEXT: ble .LBB7_9
1083 ; CHECK-NEXT: .LBB7_3: @ =>This Loop Header: Depth=1
1084 ; CHECK-NEXT: @ Child Loop BB7_6 Depth 2
1085 ; CHECK-NEXT: @ Child Loop BB7_7 Depth 3
1086 ; CHECK-NEXT: str r1, [sp, #8] @ 4-byte Spill
1087 ; CHECK-NEXT: cmp.w r12, #1
1088 ; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
1089 ; CHECK-NEXT: lsr.w r2, r1, #2
1090 ; CHECK-NEXT: str r2, [sp, #12] @ 4-byte Spill
1091 ; CHECK-NEXT: blt .LBB7_2
1092 ; CHECK-NEXT: @ %bb.4: @ in Loop: Header=BB7_3 Depth=1
1093 ; CHECK-NEXT: lsrs r2, r1, #3
1094 ; CHECK-NEXT: str r2, [sp, #24] @ 4-byte Spill
1095 ; CHECK-NEXT: beq .LBB7_2
1096 ; CHECK-NEXT: @ %bb.5: @ %.preheader
1097 ; CHECK-NEXT: @ in Loop: Header=BB7_3 Depth=1
1098 ; CHECK-NEXT: ldr r2, [sp, #12] @ 4-byte Reload
1099 ; CHECK-NEXT: lsls r1, r1, #1
1100 ; CHECK-NEXT: movs r4, #0
1101 ; CHECK-NEXT: str r1, [sp, #20] @ 4-byte Spill
1102 ; CHECK-NEXT: lsl.w r10, r2, #1
1103 ; CHECK-NEXT: .LBB7_6: @ Parent Loop BB7_3 Depth=1
1104 ; CHECK-NEXT: @ => This Loop Header: Depth=2
1105 ; CHECK-NEXT: @ Child Loop BB7_7 Depth 3
1106 ; CHECK-NEXT: ldr r1, [sp, #20] @ 4-byte Reload
1107 ; CHECK-NEXT: ldrd lr, r2, [r0, #16]
1108 ; CHECK-NEXT: ldrd r3, r8, [r0, #24]
1109 ; CHECK-NEXT: muls r1, r4, r1
1110 ; CHECK-NEXT: ldr.w r2, [r2, r11, lsl #2]
1111 ; CHECK-NEXT: ldrd r7, r5, [r0, #32]
1112 ; CHECK-NEXT: ldr.w r3, [r3, r11, lsl #2]
1113 ; CHECK-NEXT: ldr.w r6, [lr, r11, lsl #2]
1114 ; CHECK-NEXT: add.w r7, r7, r2, lsl #2
1115 ; CHECK-NEXT: ldr r2, [sp, #16] @ 4-byte Reload
1116 ; CHECK-NEXT: add.w r5, r5, r3, lsl #2
1117 ; CHECK-NEXT: add.w r1, r2, r1, lsl #2
1118 ; CHECK-NEXT: add.w r3, r8, r6, lsl #2
1119 ; CHECK-NEXT: ldr r6, [sp, #24] @ 4-byte Reload
1120 ; CHECK-NEXT: add.w r2, r1, r10, lsl #2
1121 ; CHECK-NEXT: add.w r8, r2, r10, lsl #2
1122 ; CHECK-NEXT: add.w r9, r8, r10, lsl #2
1123 ; CHECK-NEXT: dls lr, r6
1124 ; CHECK-NEXT: .LBB7_7: @ Parent Loop BB7_3 Depth=1
1125 ; CHECK-NEXT: @ Parent Loop BB7_6 Depth=2
1126 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=3
1127 ; CHECK-NEXT: vldrw.u32 q3, [r9]
1128 ; CHECK-NEXT: vldrw.u32 q4, [r2]
1129 ; CHECK-NEXT: vldrw.u32 q6, [r8]
1130 ; CHECK-NEXT: vldrw.u32 q7, [r1]
1131 ; CHECK-NEXT: vsub.f32 q5, q4, q3
1132 ; CHECK-NEXT: vsub.f32 q0, q7, q6
1133 ; CHECK-NEXT: vcadd.f32 q1, q0, q5, #270
1134 ; CHECK-NEXT: vcadd.f32 q2, q0, q5, #90
1135 ; CHECK-NEXT: vadd.f32 q0, q4, q3
1136 ; CHECK-NEXT: vadd.f32 q3, q6, q7
1137 ; CHECK-NEXT: vsub.f32 q4, q3, q0
1138 ; CHECK-NEXT: vadd.f32 q0, q3, q0
1139 ; CHECK-NEXT: vstrb.8 q0, [r1], #16
1140 ; CHECK-NEXT: vldrw.u32 q0, [r7], #16
1141 ; CHECK-NEXT: vcmul.f32 q3, q0, q4, #0
1142 ; CHECK-NEXT: vcmla.f32 q3, q0, q4, #90
1143 ; CHECK-NEXT: vstrb.8 q3, [r2], #16
1144 ; CHECK-NEXT: vldrw.u32 q0, [r3], #16
1145 ; CHECK-NEXT: vcmul.f32 q3, q0, q2, #0
1146 ; CHECK-NEXT: vcmla.f32 q3, q0, q2, #90
1147 ; CHECK-NEXT: vstrb.8 q3, [r8], #16
1148 ; CHECK-NEXT: vldrw.u32 q0, [r5], #16
1149 ; CHECK-NEXT: vcmul.f32 q2, q0, q1, #0
1150 ; CHECK-NEXT: vcmla.f32 q2, q0, q1, #90
1151 ; CHECK-NEXT: vstrb.8 q2, [r9], #16
1152 ; CHECK-NEXT: le lr, .LBB7_7
1153 ; CHECK-NEXT: @ %bb.8: @ in Loop: Header=BB7_6 Depth=2
1154 ; CHECK-NEXT: adds r4, #1
1155 ; CHECK-NEXT: cmp r4, r12
1156 ; CHECK-NEXT: bne .LBB7_6
1157 ; CHECK-NEXT: b .LBB7_2
1158 ; CHECK-NEXT: .LBB7_9:
1159 ; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
1160 ; CHECK-NEXT: lsrs r0, r0, #3
1161 ; CHECK-NEXT: wls lr, r0, .LBB7_12
1162 ; CHECK-NEXT: @ %bb.10:
1163 ; CHECK-NEXT: adr r0, .LCPI7_0
1164 ; CHECK-NEXT: vldr s0, [sp] @ 4-byte Reload
1165 ; CHECK-NEXT: vldrw.u32 q1, [r0]
1166 ; CHECK-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
1167 ; CHECK-NEXT: vadd.i32 q1, q1, r0
1168 ; CHECK-NEXT: vmov r0, s0
1169 ; CHECK-NEXT: vldrw.u32 q2, [q1, #64]!
1170 ; CHECK-NEXT: vldrw.u32 q0, [q1, #16]
1171 ; CHECK-NEXT: .LBB7_11: @ =>This Inner Loop Header: Depth=1
1172 ; CHECK-NEXT: vldrw.u32 q3, [q1, #24]
1173 ; CHECK-NEXT: vldrw.u32 q4, [q1, #8]
1174 ; CHECK-NEXT: vsub.f32 q6, q2, q0
1175 ; CHECK-NEXT: vadd.f32 q0, q2, q0
1176 ; CHECK-NEXT: vsub.f32 q5, q4, q3
1177 ; CHECK-NEXT: vadd.f32 q3, q4, q3
1178 ; CHECK-NEXT: vcadd.f32 q7, q6, q5, #270
1179 ; CHECK-NEXT: vsub.f32 q2, q0, q3
1180 ; CHECK-NEXT: vmul.f32 q7, q7, r0
1181 ; CHECK-NEXT: vadd.f32 q3, q0, q3
1182 ; CHECK-NEXT: vstrw.32 q7, [sp, #24] @ 16-byte Spill
1183 ; CHECK-NEXT: vcadd.f32 q7, q6, q5, #90
1184 ; CHECK-NEXT: vmul.f32 q4, q2, r0
1185 ; CHECK-NEXT: vldrw.u32 q2, [q1, #64]!
1186 ; CHECK-NEXT: vmul.f32 q5, q7, r0
1187 ; CHECK-NEXT: vmul.f32 q3, q3, r0
1188 ; CHECK-NEXT: vldrw.u32 q0, [q1, #16]
1189 ; CHECK-NEXT: vstrw.32 q3, [q1, #-64]
1190 ; CHECK-NEXT: vstrw.32 q4, [q1, #-56]
1191 ; CHECK-NEXT: vstrw.32 q5, [q1, #-48]
1192 ; CHECK-NEXT: vldrw.u32 q3, [sp, #24] @ 16-byte Reload
1193 ; CHECK-NEXT: vstrw.32 q3, [q1, #-40]
1194 ; CHECK-NEXT: le lr, .LBB7_11
1195 ; CHECK-NEXT: .LBB7_12:
1196 ; CHECK-NEXT: add sp, #40
1197 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
1198 ; CHECK-NEXT: add sp, #4
1199 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
1200 ; CHECK-NEXT: .p2align 4
1201 ; CHECK-NEXT: @ %bb.13:
1202 ; CHECK-NEXT: .LCPI7_0:
1203 ; CHECK-NEXT: .long 4294967232 @ 0xffffffc0
1204 ; CHECK-NEXT: .long 4294967236 @ 0xffffffc4
1205 ; CHECK-NEXT: .long 4294967264 @ 0xffffffe0
1206 ; CHECK-NEXT: .long 4294967268 @ 0xffffffe4
1207 %5 = icmp ugt i32 %2, 7
1208 br i1 %5, label %6, label %26
1212 %8 = getelementptr inbounds %struct.arm_cfft_instance_f32, ptr %0, i32 0, i32 7
1213 %9 = getelementptr inbounds %struct.arm_cfft_instance_f32, ptr %0, i32 0, i32 4
1214 %10 = getelementptr inbounds %struct.arm_cfft_instance_f32, ptr %0, i32 0, i32 8
1215 %11 = getelementptr inbounds %struct.arm_cfft_instance_f32, ptr %0, i32 0, i32 5
1216 %12 = getelementptr inbounds %struct.arm_cfft_instance_f32, ptr %0, i32 0, i32 9
1217 %13 = getelementptr inbounds %struct.arm_cfft_instance_f32, ptr %0, i32 0, i32 6
1220 14: ; preds = %6, %40
1221 %15 = phi i32 [ %2, %6 ], [ %19, %40 ]
1222 %16 = phi i32 [ %7, %6 ], [ %43, %40 ]
1223 %17 = phi i32 [ 1, %6 ], [ %41, %40 ]
1224 %18 = phi i32 [ 0, %6 ], [ %42, %40 ]
1225 %19 = lshr i32 %15, 2
1226 %20 = icmp sgt i32 %17, 0
1227 br i1 %20, label %21, label %40
1230 %22 = shl i32 %15, 1
1231 %23 = shl nuw nsw i32 %19, 1
1232 %24 = lshr i32 %15, 3
1233 %25 = icmp eq i32 %24, 0
1234 br i1 %25, label %40, label %45
1236 26: ; preds = %40, %4
1237 %27 = ptrtoint ptr %1 to i32
1238 %28 = insertelement <4 x i32> undef, i32 %27, i32 0
1239 %29 = shufflevector <4 x i32> %28, <4 x i32> undef, <4 x i32> zeroinitializer
1240 %30 = add <4 x i32> %29, <i32 -64, i32 -60, i32 -32, i32 -28>
1241 %31 = tail call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32> %30, i32 64)
1242 %32 = extractvalue { <4 x float>, <4 x i32> } %31, 1
1243 %33 = lshr i32 %2, 3
1244 %34 = icmp eq i32 %33, 0
1245 br i1 %34, label %141, label %35
1248 %36 = tail call <4 x float> @llvm.arm.mve.vldr.gather.base.v4f32.v4i32(<4 x i32> %32, i32 16)
1249 %37 = extractvalue { <4 x float>, <4 x i32> } %31, 0
1250 %38 = insertelement <4 x float> undef, float %3, i32 0
1251 %39 = shufflevector <4 x float> %38, <4 x float> undef, <4 x i32> zeroinitializer
1254 40: ; preds = %113, %21, %14
1255 %41 = shl i32 %17, 2
1256 %42 = add nuw nsw i32 %18, 1
1257 %43 = ashr i32 %16, 2
1258 %44 = icmp sgt i32 %16, 7
1259 br i1 %44, label %14, label %26
1261 45: ; preds = %21, %113
1262 %46 = phi i32 [ %114, %113 ], [ 0, %21 ]
1263 %47 = load ptr, ptr %8, align 4
1264 %48 = load ptr, ptr %9, align 4
1265 %49 = getelementptr inbounds i32, ptr %48, i32 %18
1266 %50 = load i32, ptr %49, align 4
1267 %51 = getelementptr inbounds float, ptr %47, i32 %50
1268 %52 = load ptr, ptr %10, align 4
1269 %53 = load ptr, ptr %11, align 4
1270 %54 = getelementptr inbounds i32, ptr %53, i32 %18
1271 %55 = load i32, ptr %54, align 4
1272 %56 = getelementptr inbounds float, ptr %52, i32 %55
1273 %57 = load ptr, ptr %12, align 4
1274 %58 = load ptr, ptr %13, align 4
1275 %59 = getelementptr inbounds i32, ptr %58, i32 %18
1276 %60 = load i32, ptr %59, align 4
1277 %61 = getelementptr inbounds float, ptr %57, i32 %60
1278 %62 = mul i32 %22, %46
1279 %63 = getelementptr inbounds float, ptr %1, i32 %62
1280 %64 = getelementptr inbounds float, ptr %63, i32 %23
1281 %65 = getelementptr inbounds float, ptr %64, i32 %23
1282 %66 = getelementptr inbounds float, ptr %65, i32 %23
1285 67: ; preds = %45, %67
1286 %68 = phi ptr [ %63, %45 ], [ %89, %67 ]
1287 %69 = phi ptr [ %65, %45 ], [ %103, %67 ]
1288 %70 = phi ptr [ %66, %45 ], [ %110, %67 ]
1289 %71 = phi ptr [ %64, %45 ], [ %96, %67 ]
1290 %72 = phi ptr [ %61, %45 ], [ %107, %67 ]
1291 %73 = phi ptr [ %56, %45 ], [ %93, %67 ]
1292 %74 = phi ptr [ %51, %45 ], [ %100, %67 ]
1293 %75 = phi i32 [ %24, %45 ], [ %111, %67 ]
1294 %76 = bitcast ptr %69 to ptr
1295 %77 = bitcast ptr %68 to ptr
1296 %78 = load <4 x float>, ptr %76, align 4
1297 %79 = load <4 x float>, ptr %77, align 4
1298 %80 = bitcast ptr %71 to ptr
1299 %81 = load <4 x float>, ptr %80, align 4
1300 %82 = bitcast ptr %70 to ptr
1301 %83 = load <4 x float>, ptr %82, align 4
1302 %84 = fadd <4 x float> %78, %79
1303 %85 = fsub <4 x float> %79, %78
1304 %86 = fadd <4 x float> %81, %83
1305 %87 = fsub <4 x float> %81, %83
1306 %88 = fadd <4 x float> %84, %86
1307 store <4 x float> %88, ptr %77, align 4
1308 %89 = getelementptr inbounds float, ptr %68, i32 4
1309 %90 = fsub <4 x float> %84, %86
1310 %91 = bitcast ptr %73 to ptr
1311 %92 = load <4 x float>, ptr %91, align 4
1312 %93 = getelementptr inbounds float, ptr %73, i32 4
1313 %94 = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %92, <4 x float> %90)
1314 %95 = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> %94, <4 x float> %92, <4 x float> %90)
1315 store <4 x float> %95, ptr %80, align 4
1316 %96 = getelementptr inbounds float, ptr %71, i32 4
1317 %97 = tail call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 0, <4 x float> %85, <4 x float> %87)
1318 %98 = bitcast ptr %74 to ptr
1319 %99 = load <4 x float>, ptr %98, align 4
1320 %100 = getelementptr inbounds float, ptr %74, i32 4
1321 %101 = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %99, <4 x float> %97)
1322 %102 = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> %101, <4 x float> %99, <4 x float> %97)
1323 store <4 x float> %102, ptr %76, align 4
1324 %103 = getelementptr inbounds float, ptr %69, i32 4
1325 %104 = tail call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 1, <4 x float> %85, <4 x float> %87)
1326 %105 = bitcast ptr %72 to ptr
1327 %106 = load <4 x float>, ptr %105, align 4
1328 %107 = getelementptr inbounds float, ptr %72, i32 4
1329 %108 = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %106, <4 x float> %104)
1330 %109 = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> %108, <4 x float> %106, <4 x float> %104)
1331 store <4 x float> %109, ptr %82, align 4
1332 %110 = getelementptr inbounds float, ptr %70, i32 4
1333 %111 = add nsw i32 %75, -1
1334 %112 = icmp eq i32 %111, 0
1335 br i1 %112, label %113, label %67
1338 %114 = add nuw nsw i32 %46, 1
1339 %115 = icmp eq i32 %114, %17
1340 br i1 %115, label %40, label %45
1342 116: ; preds = %35, %116
1343 %117 = phi <4 x i32> [ %32, %35 ], [ %128, %116 ]
1344 %118 = phi i32 [ %33, %35 ], [ %139, %116 ]
1345 %119 = phi <4 x float> [ %36, %35 ], [ %130, %116 ]
1346 %120 = phi <4 x float> [ %37, %35 ], [ %129, %116 ]
1347 %121 = fadd <4 x float> %120, %119
1348 %122 = fsub <4 x float> %120, %119
1349 %123 = tail call <4 x float> @llvm.arm.mve.vldr.gather.base.v4f32.v4i32(<4 x i32> %117, i32 8)
1350 %124 = tail call <4 x float> @llvm.arm.mve.vldr.gather.base.v4f32.v4i32(<4 x i32> %117, i32 24)
1351 %125 = fadd <4 x float> %123, %124
1352 %126 = fsub <4 x float> %123, %124
1353 %127 = tail call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32> %117, i32 64)
1354 %128 = extractvalue { <4 x float>, <4 x i32> } %127, 1
1355 %129 = extractvalue { <4 x float>, <4 x i32> } %127, 0
1356 %130 = tail call <4 x float> @llvm.arm.mve.vldr.gather.base.v4f32.v4i32(<4 x i32> %128, i32 16)
1357 %131 = fadd <4 x float> %121, %125
1358 %132 = fmul <4 x float> %39, %131
1359 tail call void @llvm.arm.mve.vstr.scatter.base.v4i32.v4f32(<4 x i32> %128, i32 -64, <4 x float> %132)
1360 %133 = fsub <4 x float> %121, %125
1361 %134 = fmul <4 x float> %39, %133
1362 tail call void @llvm.arm.mve.vstr.scatter.base.v4i32.v4f32(<4 x i32> %128, i32 -56, <4 x float> %134)
1363 %135 = tail call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 0, <4 x float> %122, <4 x float> %126)
1364 %136 = fmul <4 x float> %39, %135
1365 tail call void @llvm.arm.mve.vstr.scatter.base.v4i32.v4f32(<4 x i32> %128, i32 -48, <4 x float> %136)
1366 %137 = tail call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 1, <4 x float> %122, <4 x float> %126)
1367 %138 = fmul <4 x float> %39, %137
1368 tail call void @llvm.arm.mve.vstr.scatter.base.v4i32.v4f32(<4 x i32> %128, i32 -40, <4 x float> %138)
1369 %139 = add nsw i32 %118, -1
1370 %140 = icmp eq i32 %139, 0
1371 br i1 %140, label %141, label %116
1373 141: ; preds = %116, %26
1377 declare <16 x i1> @llvm.arm.mve.vctp8(i32)
1378 declare <8 x i1> @llvm.arm.mve.vctp16(i32)
1379 declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>)
1380 declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
1381 declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32, <8 x i1>, <8 x i8>)
1382 declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32 immarg, <16 x i1>, <16 x i8>)
1383 declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
1384 declare i32 @llvm.vector.reduce.add.v16i8(<16 x i32> %ext4)
1385 declare i32 @llvm.arm.mve.vmldava.v8i16(i32, i32, i32, i32, <8 x i16>, <8 x i16>)
1386 declare i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32, i32, i32, i32, <16 x i8>, <16 x i8>, <16 x i1>)
1387 declare i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32, i32, i32, i32, <8 x i16>, <8 x i16>, <8 x i1>)
1388 declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>)
1390 declare <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32, <4 x float>, <4 x float>)
1391 declare <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32, <4 x float>, <4 x float>, <4 x float>)
1392 declare <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32, i32, <4 x float>, <4 x float>)
1393 declare { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32>, i32)
1394 declare <4 x float> @llvm.arm.mve.vldr.gather.base.v4f32.v4i32(<4 x i32>, i32)
1395 declare void @llvm.arm.mve.vstr.scatter.base.v4i32.v4f32(<4 x i32>, i32, <4 x float>)