1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
5 define dso_local arm_aapcs_vfpcc void @unrolled_and_vector(i8* nocapture %res, i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) {
7 %cmp10 = icmp eq i32 %N, 0
8 br i1 %cmp10, label %for.cond.cleanup, label %vector.memcheck
10 vector.memcheck: ; preds = %entry
11 %scevgep = getelementptr i8, i8* %res, i32 %N
12 %scevgep12 = getelementptr i8, i8* %a, i32 %N
13 %scevgep13 = getelementptr i8, i8* %b, i32 %N
14 %bound0 = icmp ugt i8* %scevgep12, %res
15 %bound1 = icmp ugt i8* %scevgep, %a
16 %found.conflict = and i1 %bound0, %bound1
17 %bound014 = icmp ugt i8* %scevgep13, %res
18 %bound115 = icmp ugt i8* %scevgep, %b
19 %found.conflict16 = and i1 %bound014, %bound115
20 %conflict.rdx = or i1 %found.conflict, %found.conflict16
23 %2 = shl nuw i32 %1, 4
26 %5 = add nuw nsw i32 %4, 1
27 br i1 %conflict.rdx, label %for.body.preheader, label %vector.ph
29 for.body.preheader: ; preds = %vector.memcheck
31 %xtraiter = and i32 %N, 3
32 %7 = icmp ult i32 %6, 3
34 %9 = sub i32 %8, %xtraiter
36 %11 = add nuw nsw i32 %10, 1
37 br i1 %7, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
39 for.body.preheader.new: ; preds = %for.body.preheader
40 %start1 = call i32 @llvm.start.loop.iterations.i32(i32 %11)
43 vector.ph: ; preds = %vector.memcheck
44 %start2 = call i32 @llvm.start.loop.iterations.i32(i32 %5)
47 vector.body: ; preds = %vector.body, %vector.ph
48 %lsr.iv50 = phi i8* [ %scevgep51, %vector.body ], [ %res, %vector.ph ]
49 %lsr.iv47 = phi i8* [ %scevgep48, %vector.body ], [ %b, %vector.ph ]
50 %lsr.iv = phi i8* [ %scevgep45, %vector.body ], [ %a, %vector.ph ]
51 %12 = phi i32 [ %start2, %vector.ph ], [ %17, %vector.body ]
52 %13 = phi i32 [ %N, %vector.ph ], [ %15, %vector.body ]
53 %lsr.iv5052 = bitcast i8* %lsr.iv50 to <16 x i8>*
54 %lsr.iv4749 = bitcast i8* %lsr.iv47 to <16 x i8>*
55 %lsr.iv46 = bitcast i8* %lsr.iv to <16 x i8>*
56 %14 = call <16 x i1> @llvm.arm.mve.vctp8(i32 %13)
58 %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv46, i32 1, <16 x i1> %14, <16 x i8> undef)
59 %wide.masked.load19 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv4749, i32 1, <16 x i1> %14, <16 x i8> undef)
60 %16 = add <16 x i8> %wide.masked.load19, %wide.masked.load
61 call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %16, <16 x i8>* %lsr.iv5052, i32 1, <16 x i1> %14)
62 %scevgep45 = getelementptr i8, i8* %lsr.iv, i32 16
63 %scevgep48 = getelementptr i8, i8* %lsr.iv47, i32 16
64 %scevgep51 = getelementptr i8, i8* %lsr.iv50, i32 16
65 %17 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %12, i32 1)
66 %18 = icmp ne i32 %17, 0
67 br i1 %18, label %vector.body, label %for.cond.cleanup
69 for.cond.cleanup.loopexit.unr-lcssa: ; preds = %for.body, %for.body.preheader
70 %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
71 %lcmp.mod = icmp eq i32 %xtraiter, 0
72 br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
74 for.body.epil: ; preds = %for.cond.cleanup.loopexit.unr-lcssa
75 %arrayidx.epil = getelementptr inbounds i8, i8* %a, i32 %i.011.unr
76 %19 = load i8, i8* %arrayidx.epil, align 1
77 %arrayidx1.epil = getelementptr inbounds i8, i8* %b, i32 %i.011.unr
78 %20 = load i8, i8* %arrayidx1.epil, align 1
79 %add.epil = add i8 %20, %19
80 %arrayidx4.epil = getelementptr inbounds i8, i8* %res, i32 %i.011.unr
81 store i8 %add.epil, i8* %arrayidx4.epil, align 1
82 %inc.epil = add nuw i32 %i.011.unr, 1
83 %epil.iter.cmp = icmp eq i32 %xtraiter, 1
84 br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil.1
86 for.cond.cleanup: ; preds = %vector.body, %for.cond.cleanup.loopexit.unr-lcssa, %for.body.epil.1, %for.body.epil, %for.body.epil.2, %entry
89 for.body: ; preds = %for.body, %for.body.preheader.new
90 %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
91 %21 = phi i32 [ %start1, %for.body.preheader.new ], [ %30, %for.body ]
92 %scevgep23 = getelementptr i8, i8* %a, i32 %i.011
93 %scevgep2453 = bitcast i8* %scevgep23 to i8*
94 %22 = load i8, i8* %scevgep2453, align 1
95 %scevgep27 = getelementptr i8, i8* %b, i32 %i.011
96 %scevgep2854 = bitcast i8* %scevgep27 to i8*
97 %23 = load i8, i8* %scevgep2854, align 1
98 %add = add i8 %23, %22
99 %scevgep31 = getelementptr i8, i8* %res, i32 %i.011
100 %scevgep3255 = bitcast i8* %scevgep31 to i8*
101 store i8 %add, i8* %scevgep3255, align 1
102 %scevgep39 = getelementptr i8, i8* %a, i32 %i.011
103 %scevgep40 = getelementptr i8, i8* %scevgep39, i32 1
104 %24 = load i8, i8* %scevgep40, align 1
105 %scevgep41 = getelementptr i8, i8* %b, i32 %i.011
106 %scevgep42 = getelementptr i8, i8* %scevgep41, i32 1
107 %25 = load i8, i8* %scevgep42, align 1
108 %add.1 = add i8 %25, %24
109 %scevgep43 = getelementptr i8, i8* %res, i32 %i.011
110 %scevgep44 = getelementptr i8, i8* %scevgep43, i32 1
111 store i8 %add.1, i8* %scevgep44, align 1
112 %scevgep33 = getelementptr i8, i8* %a, i32 %i.011
113 %scevgep34 = getelementptr i8, i8* %scevgep33, i32 2
114 %26 = load i8, i8* %scevgep34, align 1
115 %scevgep35 = getelementptr i8, i8* %b, i32 %i.011
116 %scevgep36 = getelementptr i8, i8* %scevgep35, i32 2
117 %27 = load i8, i8* %scevgep36, align 1
118 %add.2 = add i8 %27, %26
119 %scevgep37 = getelementptr i8, i8* %res, i32 %i.011
120 %scevgep38 = getelementptr i8, i8* %scevgep37, i32 2
121 store i8 %add.2, i8* %scevgep38, align 1
122 %scevgep21 = getelementptr i8, i8* %a, i32 %i.011
123 %scevgep22 = getelementptr i8, i8* %scevgep21, i32 3
124 %28 = load i8, i8* %scevgep22, align 1
125 %scevgep25 = getelementptr i8, i8* %b, i32 %i.011
126 %scevgep26 = getelementptr i8, i8* %scevgep25, i32 3
127 %29 = load i8, i8* %scevgep26, align 1
128 %add.3 = add i8 %29, %28
129 %scevgep29 = getelementptr i8, i8* %res, i32 %i.011
130 %scevgep30 = getelementptr i8, i8* %scevgep29, i32 3
131 store i8 %add.3, i8* %scevgep30, align 1
132 %inc.3 = add nuw i32 %i.011, 4
133 %30 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %21, i32 1)
134 %31 = icmp ne i32 %30, 0
135 br i1 %31, label %for.body, label %for.cond.cleanup.loopexit.unr-lcssa
137 for.body.epil.1: ; preds = %for.body.epil
138 %arrayidx.epil.1 = getelementptr inbounds i8, i8* %a, i32 %inc.epil
139 %32 = load i8, i8* %arrayidx.epil.1, align 1
140 %arrayidx1.epil.1 = getelementptr inbounds i8, i8* %b, i32 %inc.epil
141 %33 = load i8, i8* %arrayidx1.epil.1, align 1
142 %add.epil.1 = add i8 %33, %32
143 %arrayidx4.epil.1 = getelementptr inbounds i8, i8* %res, i32 %inc.epil
144 store i8 %add.epil.1, i8* %arrayidx4.epil.1, align 1
145 %inc.epil.1 = add nuw i32 %i.011.unr, 2
146 %epil.iter.cmp.1 = icmp eq i32 %xtraiter, 2
147 br i1 %epil.iter.cmp.1, label %for.cond.cleanup, label %for.body.epil.2
149 for.body.epil.2: ; preds = %for.body.epil.1
150 %arrayidx.epil.2 = getelementptr inbounds i8, i8* %a, i32 %inc.epil.1
151 %34 = load i8, i8* %arrayidx.epil.2, align 1
152 %arrayidx1.epil.2 = getelementptr inbounds i8, i8* %b, i32 %inc.epil.1
153 %35 = load i8, i8* %arrayidx1.epil.2, align 1
154 %add.epil.2 = add i8 %35, %34
155 %arrayidx4.epil.2 = getelementptr inbounds i8, i8* %res, i32 %inc.epil.1
156 store i8 %add.epil.2, i8* %arrayidx4.epil.2, align 1
157 br label %for.cond.cleanup
160 declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>) #1
161 declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>) #2
162 declare i32 @llvm.start.loop.iterations.i32(i32) #3
163 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
164 declare <16 x i1> @llvm.arm.mve.vctp8(i32) #4
168 name: unrolled_and_vector
170 exposesReturnsTwice: false
172 regBankSelected: false
175 tracksRegLiveness: true
179 - { reg: '$r0', virtual-reg: '' }
180 - { reg: '$r1', virtual-reg: '' }
181 - { reg: '$r2', virtual-reg: '' }
182 - { reg: '$r3', virtual-reg: '' }
184 isFrameAddressTaken: false
185 isReturnAddressTaken: false
189 offsetAdjustment: -24
195 cvBytesOfCalleeSavedRegisters: 0
196 hasOpaqueSPAdjustment: false
198 hasMustTailInVarArgFunc: false
204 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
205 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
206 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
207 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
208 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
209 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
210 - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4,
211 stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true,
212 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
213 - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4,
214 stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true,
215 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
216 - { id: 4, name: '', type: spill-slot, offset: -20, size: 4, alignment: 4,
217 stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
218 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
219 - { id: 5, name: '', type: spill-slot, offset: -24, size: 4, alignment: 4,
220 stack-id: default, callee-saved-register: '$r11', callee-saved-restored: true,
221 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
222 - { id: 6, name: '', type: spill-slot, offset: -28, size: 4, alignment: 4,
223 stack-id: default, callee-saved-register: '$r9', callee-saved-restored: true,
224 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
225 - { id: 7, name: '', type: spill-slot, offset: -32, size: 4, alignment: 4,
226 stack-id: default, callee-saved-register: '$r8', callee-saved-restored: true,
227 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
230 machineFunctionInfo: {}
232 ; CHECK-LABEL: name: unrolled_and_vector
234 ; CHECK: successors: %bb.11(0x30000000), %bb.1(0x50000000)
235 ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r11
236 ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
237 ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
238 ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
239 ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
240 ; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
241 ; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
242 ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
243 ; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
244 ; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
245 ; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r11
246 ; CHECK: frame-setup CFI_INSTRUCTION offset $r11, -24
247 ; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -28
248 ; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -32
249 ; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
250 ; CHECK: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
251 ; CHECK: bb.1.vector.memcheck:
252 ; CHECK: successors: %bb.2(0x40000000), %bb.4(0x40000000)
253 ; CHECK: liveins: $r0, $r1, $r2, $r3
254 ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
255 ; CHECK: renamable $r5, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
256 ; CHECK: tCMPr renamable $r4, renamable $r2, 14 /* CC::al */, $noreg, implicit-def $cpsr
257 ; CHECK: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
258 ; CHECK: renamable $r12 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
259 ; CHECK: tCMPr killed renamable $r5, renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
260 ; CHECK: renamable $r6 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
261 ; CHECK: tCMPr killed renamable $r4, renamable $r1, 14 /* CC::al */, $noreg, implicit-def $cpsr
262 ; CHECK: renamable $r5 = t2ADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg, $noreg
263 ; CHECK: renamable $r4 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
264 ; CHECK: tCMPr killed renamable $r5, renamable $r0, 14 /* CC::al */, $noreg, implicit-def $cpsr
265 ; CHECK: renamable $r5 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
266 ; CHECK: renamable $r5, dead $cpsr = tAND killed renamable $r5, killed renamable $r4, 14 /* CC::al */, $noreg
267 ; CHECK: dead renamable $r5, $cpsr = tLSLri killed renamable $r5, 31, 14 /* CC::al */, $noreg
268 ; CHECK: t2IT 0, 4, implicit-def $itstate
269 ; CHECK: renamable $r6 = t2ANDrr killed renamable $r6, killed renamable $r12, 0 /* CC::eq */, $cpsr, $noreg, implicit killed $r6, implicit $itstate
270 ; CHECK: dead renamable $r6 = t2LSLri killed renamable $r6, 31, 0 /* CC::eq */, killed $cpsr, def $cpsr, implicit killed $r6, implicit killed $itstate
271 ; CHECK: tBcc %bb.4, 0 /* CC::eq */, killed $cpsr
272 ; CHECK: bb.2.for.body.preheader:
273 ; CHECK: successors: %bb.3(0x40000000), %bb.6(0x40000000)
274 ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3
275 ; CHECK: renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14 /* CC::al */, $noreg
276 ; CHECK: renamable $r12 = t2ANDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
277 ; CHECK: tCMPi8 killed renamable $r4, 3, 14 /* CC::al */, $noreg, implicit-def $cpsr
278 ; CHECK: tBcc %bb.6, 2 /* CC::hs */, killed $cpsr
280 ; CHECK: successors: %bb.8(0x80000000)
281 ; CHECK: liveins: $r0, $r1, $r2, $r12
282 ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
283 ; CHECK: tB %bb.8, 14 /* CC::al */, $noreg
284 ; CHECK: bb.4.vector.ph:
285 ; CHECK: successors: %bb.5(0x80000000)
286 ; CHECK: liveins: $r0, $r1, $r2, $r3
287 ; CHECK: $lr = MVE_DLSTP_8 killed renamable $r3
288 ; CHECK: bb.5.vector.body:
289 ; CHECK: successors: %bb.5(0x7c000000), %bb.11(0x04000000)
290 ; CHECK: liveins: $lr, $r0, $r1, $r2
291 ; CHECK: renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 0, $noreg :: (load (s128) from %ir.lsr.iv46, align 1)
292 ; CHECK: renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 0, $noreg :: (load (s128) from %ir.lsr.iv4749, align 1)
293 ; CHECK: renamable $q0 = MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
294 ; CHECK: renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg :: (store (s128) into %ir.lsr.iv5052, align 1)
295 ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5
296 ; CHECK: tB %bb.11, 14 /* CC::al */, $noreg
297 ; CHECK: bb.6.for.body.preheader.new:
298 ; CHECK: successors: %bb.7(0x80000000)
299 ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
300 ; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
301 ; CHECK: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
302 ; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r3, 19, 14 /* CC::al */, $noreg, $noreg
303 ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
304 ; CHECK: bb.7.for.body:
305 ; CHECK: successors: %bb.7(0x7c000000), %bb.8(0x04000000)
306 ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
307 ; CHECK: renamable $r4 = tLDRBr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep2453)
308 ; CHECK: renamable $r9 = t2ADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg, $noreg
309 ; CHECK: renamable $r5 = tLDRBr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep2854)
310 ; CHECK: renamable $r6, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
311 ; CHECK: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r5, 14 /* CC::al */, $noreg
312 ; CHECK: tSTRBr killed renamable $r4, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep3255)
313 ; CHECK: renamable $r8 = t2LDRBi12 renamable $r9, 1, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep40)
314 ; CHECK: renamable $r5 = tLDRBi renamable $r6, 1, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep42)
315 ; CHECK: renamable $r8 = tADDhirr killed renamable $r8, killed renamable $r5, 14 /* CC::al */, $noreg
316 ; CHECK: renamable $r5, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14 /* CC::al */, $noreg
317 ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
318 ; CHECK: t2STRBi12 killed renamable $r8, renamable $r5, 1, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep44)
319 ; CHECK: renamable $r8 = t2LDRBi12 renamable $r9, 2, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep34)
320 ; CHECK: renamable $r4 = tLDRBi renamable $r6, 2, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep36)
321 ; CHECK: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r8, 14 /* CC::al */, $noreg
322 ; CHECK: tSTRBi killed renamable $r4, renamable $r5, 2, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep38)
323 ; CHECK: renamable $r4 = t2LDRBi12 killed renamable $r9, 3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep22)
324 ; CHECK: renamable $r6 = tLDRBi killed renamable $r6, 3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.scevgep26)
325 ; CHECK: renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r6, 14 /* CC::al */, $noreg
326 ; CHECK: tSTRBi killed renamable $r4, killed renamable $r5, 3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.scevgep30)
327 ; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.7
328 ; CHECK: bb.8.for.cond.cleanup.loopexit.unr-lcssa:
329 ; CHECK: successors: %bb.11(0x30000000), %bb.9(0x50000000)
330 ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
331 ; CHECK: t2CMPri renamable $r12, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
332 ; CHECK: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
333 ; CHECK: bb.9.for.body.epil:
334 ; CHECK: successors: %bb.11(0x40000000), %bb.10(0x40000000)
335 ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
336 ; CHECK: renamable $r6 = tLDRBr renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil)
337 ; CHECK: t2CMPri renamable $r12, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
338 ; CHECK: renamable $r5 = tLDRBr renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil)
339 ; CHECK: renamable $r6 = tADDhirr killed renamable $r6, killed renamable $r5, 14 /* CC::al */, $noreg
340 ; CHECK: tSTRBr killed renamable $r6, renamable $r0, $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil)
341 ; CHECK: tBcc %bb.11, 0 /* CC::eq */, killed $cpsr
342 ; CHECK: bb.10.for.body.epil.1:
343 ; CHECK: successors: %bb.11(0x40000000), %bb.12(0x40000000)
344 ; CHECK: liveins: $r0, $r1, $r2, $r3, $r12
345 ; CHECK: renamable $r6, dead $cpsr = nuw tADDi3 renamable $r3, 1, 14 /* CC::al */, $noreg
346 ; CHECK: t2CMPri killed renamable $r12, 2, 14 /* CC::al */, $noreg, implicit-def $cpsr
347 ; CHECK: renamable $r5 = tLDRBr renamable $r1, $r6, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil.1)
348 ; CHECK: renamable $r4 = tLDRBr renamable $r2, $r6, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil.1)
349 ; CHECK: renamable $r5 = tADDhirr killed renamable $r5, killed renamable $r4, 14 /* CC::al */, $noreg
350 ; CHECK: tSTRBr killed renamable $r5, renamable $r0, killed $r6, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil.1)
351 ; CHECK: tBcc %bb.12, 1 /* CC::ne */, killed $cpsr
352 ; CHECK: bb.11.for.cond.cleanup:
353 ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r11
354 ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
355 ; CHECK: bb.12.for.body.epil.2:
356 ; CHECK: liveins: $r0, $r1, $r2, $r3
357 ; CHECK: renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 2, 14 /* CC::al */, $noreg
358 ; CHECK: renamable $r1 = tLDRBr killed renamable $r1, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx.epil.2)
359 ; CHECK: renamable $r2 = tLDRBr killed renamable $r2, $r3, 14 /* CC::al */, $noreg :: (load (s8) from %ir.arrayidx1.epil.2)
360 ; CHECK: renamable $r1 = tADDhirr killed renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
361 ; CHECK: tSTRBr killed renamable $r1, killed renamable $r0, killed $r3, 14 /* CC::al */, $noreg :: (store (s8) into %ir.arrayidx4.epil.2)
362 ; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r11
363 ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
365 successors: %bb.11(0x30000000), %bb.1(0x50000000)
366 liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r11
368 frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
369 frame-setup CFI_INSTRUCTION def_cfa_offset 20
370 frame-setup CFI_INSTRUCTION offset $lr, -4
371 frame-setup CFI_INSTRUCTION offset $r7, -8
372 frame-setup CFI_INSTRUCTION offset $r6, -12
373 frame-setup CFI_INSTRUCTION offset $r5, -16
374 frame-setup CFI_INSTRUCTION offset $r4, -20
375 $r7 = frame-setup tADDrSPi $sp, 3, 14, $noreg
376 frame-setup CFI_INSTRUCTION def_cfa $r7, 8
377 $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r8, killed $r9, killed $r11
378 frame-setup CFI_INSTRUCTION offset $r11, -24
379 frame-setup CFI_INSTRUCTION offset $r9, -28
380 frame-setup CFI_INSTRUCTION offset $r8, -32
381 tCMPi8 renamable $r3, 0, 14, $noreg, implicit-def $cpsr
382 tBcc %bb.11, 0, killed $cpsr
384 bb.1.vector.memcheck:
385 successors: %bb.2(0x40000000), %bb.4(0x40000000)
386 liveins: $r0, $r1, $r2, $r3
388 renamable $r4, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14, $noreg
389 renamable $r5, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
390 tCMPr renamable $r4, renamable $r2, 14, $noreg, implicit-def $cpsr
391 renamable $lr = t2MOVi 1, 14, $noreg, $noreg
392 renamable $r12 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
393 tCMPr killed renamable $r5, renamable $r0, 14, $noreg, implicit-def $cpsr
394 renamable $r6 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
395 tCMPr killed renamable $r4, renamable $r1, 14, $noreg, implicit-def $cpsr
396 renamable $r5 = t2ADDrr renamable $r1, renamable $r3, 14, $noreg, $noreg
397 renamable $r4 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
398 tCMPr killed renamable $r5, renamable $r0, 14, $noreg, implicit-def $cpsr
399 renamable $r5 = t2CSINC $zr, $zr, 9, implicit killed $cpsr
400 renamable $r5, dead $cpsr = tAND killed renamable $r5, killed renamable $r4, 14, $noreg
401 dead renamable $r5, $cpsr = tLSLri killed renamable $r5, 31, 14, $noreg
402 t2IT 0, 4, implicit-def $itstate
403 renamable $r6 = t2ANDrr killed renamable $r6, killed renamable $r12, 0, $cpsr, $noreg, implicit $r6, implicit $itstate
404 dead renamable $r6 = t2LSLri killed renamable $r6, 31, 0, killed $cpsr, def $cpsr, implicit killed $r6, implicit killed $itstate
405 tBcc %bb.4, 0, killed $cpsr
407 bb.2.for.body.preheader:
408 successors: %bb.3(0x40000000), %bb.6(0x40000000)
409 liveins: $lr, $r0, $r1, $r2, $r3
411 renamable $r4, dead $cpsr = tSUBi3 renamable $r3, 1, 14, $noreg
412 renamable $r12 = t2ANDri renamable $r3, 3, 14, $noreg, $noreg
413 tCMPi8 killed renamable $r4, 3, 14, $noreg, implicit-def $cpsr
414 tBcc %bb.6, 2, killed $cpsr
417 successors: %bb.8(0x80000000)
418 liveins: $r0, $r1, $r2, $r12
420 renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
424 successors: %bb.5(0x80000000)
425 liveins: $lr, $r0, $r1, $r2, $r3
427 renamable $r6 = t2ADDri renamable $r3, 15, 14, $noreg, $noreg
428 renamable $r6 = t2BICri killed renamable $r6, 15, 14, $noreg, $noreg
429 renamable $r6, dead $cpsr = tSUBi8 killed renamable $r6, 16, 14, $noreg
430 renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r6, 35, 14, $noreg, $noreg
431 $lr = t2DoLoopStart renamable $lr
434 successors: %bb.5(0x7c000000), %bb.11(0x04000000)
435 liveins: $lr, $r0, $r1, $r2, $r3
437 renamable $vpr = MVE_VCTP8 renamable $r3, 0, $noreg
438 renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 16, 14, $noreg
439 MVE_VPST 4, implicit $vpr
440 renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, renamable $vpr :: (load (s128) from %ir.lsr.iv46, align 1)
441 renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 1, renamable $vpr :: (load (s128) from %ir.lsr.iv4749, align 1)
442 renamable $lr = t2LoopDec killed renamable $lr, 1
443 renamable $q0 = MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
444 MVE_VPST 8, implicit $vpr
445 renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr :: (store (s128) into %ir.lsr.iv5052, align 1)
446 t2LoopEnd renamable $lr, %bb.5, implicit-def dead $cpsr
447 tB %bb.11, 14, $noreg
449 bb.6.for.body.preheader.new:
450 successors: %bb.7(0x80000000)
451 liveins: $lr, $r0, $r1, $r2, $r3, $r12
453 renamable $r3 = t2BICri killed renamable $r3, 3, 14, $noreg, $noreg
454 renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
455 renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r3, 19, 14, $noreg, $noreg
456 renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
457 $lr = t2DoLoopStart renamable $lr
460 successors: %bb.7(0x7c000000), %bb.8(0x04000000)
461 liveins: $lr, $r0, $r1, $r2, $r3, $r12
463 renamable $r4 = tLDRBr renamable $r1, $r3, 14, $noreg :: (load (s8) from %ir.scevgep2453)
464 renamable $r9 = t2ADDrr renamable $r1, renamable $r3, 14, $noreg, $noreg
465 renamable $r5 = tLDRBr renamable $r2, $r3, 14, $noreg :: (load (s8) from %ir.scevgep2854)
466 renamable $r6, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
467 renamable $lr = t2LoopDec killed renamable $lr, 1
468 renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r5, 14, $noreg
469 tSTRBr killed renamable $r4, renamable $r0, $r3, 14, $noreg :: (store (s8) into %ir.scevgep3255)
470 renamable $r8 = t2LDRBi12 renamable $r9, 1, 14, $noreg :: (load (s8) from %ir.scevgep40)
471 renamable $r5 = tLDRBi renamable $r6, 1, 14, $noreg :: (load (s8) from %ir.scevgep42)
472 renamable $r8 = tADDhirr killed renamable $r8, killed renamable $r5, 14, $noreg
473 renamable $r5, dead $cpsr = tADDrr renamable $r0, renamable $r3, 14, $noreg
474 renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 4, 14, $noreg
475 t2STRBi12 killed renamable $r8, renamable $r5, 1, 14, $noreg :: (store (s8) into %ir.scevgep44)
476 renamable $r8 = t2LDRBi12 renamable $r9, 2, 14, $noreg :: (load (s8) from %ir.scevgep34)
477 renamable $r4 = tLDRBi renamable $r6, 2, 14, $noreg :: (load (s8) from %ir.scevgep36)
478 renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r8, 14, $noreg
479 tSTRBi killed renamable $r4, renamable $r5, 2, 14, $noreg :: (store (s8) into %ir.scevgep38)
480 renamable $r4 = t2LDRBi12 killed renamable $r9, 3, 14, $noreg :: (load (s8) from %ir.scevgep22)
481 renamable $r6 = tLDRBi killed renamable $r6, 3, 14, $noreg :: (load (s8) from %ir.scevgep26)
482 renamable $r4 = tADDhirr killed renamable $r4, killed renamable $r6, 14, $noreg
483 tSTRBi killed renamable $r4, killed renamable $r5, 3, 14, $noreg :: (store (s8) into %ir.scevgep30)
484 t2LoopEnd renamable $lr, %bb.7, implicit-def dead $cpsr
487 bb.8.for.cond.cleanup.loopexit.unr-lcssa:
488 successors: %bb.11(0x30000000), %bb.9(0x50000000)
489 liveins: $r0, $r1, $r2, $r3, $r12
491 t2CMPri renamable $r12, 0, 14, $noreg, implicit-def $cpsr
492 tBcc %bb.11, 0, killed $cpsr
495 successors: %bb.11(0x40000000), %bb.10(0x40000000)
496 liveins: $r0, $r1, $r2, $r3, $r12
498 renamable $r6 = tLDRBr renamable $r1, $r3, 14, $noreg :: (load (s8) from %ir.arrayidx.epil)
499 t2CMPri renamable $r12, 1, 14, $noreg, implicit-def $cpsr
500 renamable $r5 = tLDRBr renamable $r2, $r3, 14, $noreg :: (load (s8) from %ir.arrayidx1.epil)
501 renamable $r6 = tADDhirr killed renamable $r6, killed renamable $r5, 14, $noreg
502 tSTRBr killed renamable $r6, renamable $r0, $r3, 14, $noreg :: (store (s8) into %ir.arrayidx4.epil)
503 tBcc %bb.11, 0, killed $cpsr
505 bb.10.for.body.epil.1:
506 successors: %bb.11(0x40000000), %bb.12(0x40000000)
507 liveins: $r0, $r1, $r2, $r3, $r12
509 renamable $r6, dead $cpsr = nuw tADDi3 renamable $r3, 1, 14, $noreg
510 t2CMPri killed renamable $r12, 2, 14, $noreg, implicit-def $cpsr
511 renamable $r5 = tLDRBr renamable $r1, $r6, 14, $noreg :: (load (s8) from %ir.arrayidx.epil.1)
512 renamable $r4 = tLDRBr renamable $r2, $r6, 14, $noreg :: (load (s8) from %ir.arrayidx1.epil.1)
513 renamable $r5 = tADDhirr killed renamable $r5, killed renamable $r4, 14, $noreg
514 tSTRBr killed renamable $r5, renamable $r0, killed $r6, 14, $noreg :: (store (s8) into %ir.arrayidx4.epil.1)
515 tBcc %bb.12, 1, killed $cpsr
517 bb.11.for.cond.cleanup:
518 $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r8, def $r9, def $r11
519 tPOP_RET 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
521 bb.12.for.body.epil.2:
522 liveins: $r0, $r1, $r2, $r3
524 renamable $r3, dead $cpsr = nuw tADDi8 killed renamable $r3, 2, 14, $noreg
525 renamable $r1 = tLDRBr killed renamable $r1, $r3, 14, $noreg :: (load (s8) from %ir.arrayidx.epil.2)
526 renamable $r2 = tLDRBr killed renamable $r2, $r3, 14, $noreg :: (load (s8) from %ir.arrayidx1.epil.2)
527 renamable $r1 = tADDhirr killed renamable $r1, killed renamable $r2, 14, $noreg
528 tSTRBr killed renamable $r1, killed renamable $r0, killed $r3, 14, $noreg :: (store (s8) into %ir.arrayidx4.epil.2)
529 $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r8, def $r9, def $r11
530 tPOP_RET 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc