1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
5 define dso_local <4 x i32> @invariant_use_store(i16* nocapture readonly %a, i32* %c, i32 %N, <4 x i32> %pass) {
7 %cmp9 = icmp eq i32 %N, 0
9 %tmp1 = lshr i32 %tmp, 2
10 %tmp2 = shl nuw i32 %tmp1, 2
11 %tmp3 = add i32 %tmp2, -4
12 %tmp4 = lshr i32 %tmp3, 2
13 %tmp5 = add nuw nsw i32 %tmp4, 1
14 br i1 %cmp9, label %exit, label %vector.ph
16 vector.ph: ; preds = %entry
17 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp5)
20 vector.body: ; preds = %vector.body, %vector.ph
21 %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %start, %vector.ph ]
22 %lsr.iv20 = phi i32* [ %scevgep20, %vector.body ], [ %c, %vector.ph ]
23 %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
24 %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %tmp13, %vector.body ]
25 %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
26 %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
27 %lsr.store = bitcast i32* %lsr.iv20 to <4 x i32>*
28 %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
29 %tmp9 = sub i32 %tmp7, 4
30 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
31 %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
32 %tmp12 = mul nsw <4 x i32> %pass, %tmp10
33 %tmp13 = add <4 x i32> %tmp12, %vec.phi
34 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %tmp13, <4 x i32>* %lsr.store, i32 4, <4 x i1> %tmp8)
35 %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
36 %scevgep20 = getelementptr i32, i32* %lsr.iv20, i32 4
37 %tmp14 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
38 %tmp15 = icmp ne i32 %tmp14, 0
39 %lsr.iv.next = add nsw i32 %lsr.iv1, -1
40 br i1 %tmp15, label %vector.body, label %exit
42 exit: ; preds = %vector.body, %entry
46 define dso_local i32 @invariant_mul_use_reduce(i16* nocapture readonly %a, i32* %c, i32 %N, <4 x i32> %pass) {
48 %cmp9 = icmp eq i32 %N, 0
50 %tmp1 = lshr i32 %tmp, 2
51 %tmp2 = shl nuw i32 %tmp1, 2
52 %tmp3 = add i32 %tmp2, -4
53 %tmp4 = lshr i32 %tmp3, 2
54 %tmp5 = add nuw nsw i32 %tmp4, 1
55 br i1 %cmp9, label %exit, label %vector.ph
57 vector.ph: ; preds = %entry
58 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp5)
61 vector.body: ; preds = %vector.body, %vector.ph
62 %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %start, %vector.ph ]
63 %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
64 %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
65 %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
66 %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
67 %tmp9 = sub i32 %tmp7, 4
68 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
69 %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
70 %tmp12 = mul nsw <4 x i32> %pass, %tmp10
71 %tmp13 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp12)
72 %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
73 %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
74 %tmp16 = icmp ne i32 %tmp15, 0
75 %lsr.iv.next = add nsw i32 %lsr.iv1, -1
76 br i1 %tmp16, label %vector.body, label %exit
78 exit: ; preds = %vector.body, %entry
79 %res = phi i32 [ 0, %entry ], [ %tmp13, %vector.body ]
83 define dso_local i32 @invariant_add_use_reduce(i16* nocapture readonly %a, i32* %c, i32 %N, <4 x i32> %pass) {
85 %cmp9 = icmp eq i32 %N, 0
87 %tmp1 = lshr i32 %tmp, 2
88 %tmp2 = shl nuw i32 %tmp1, 2
89 %tmp3 = add i32 %tmp2, -4
90 %tmp4 = lshr i32 %tmp3, 2
91 %tmp5 = add nuw nsw i32 %tmp4, 1
92 br i1 %cmp9, label %exit, label %vector.ph
94 vector.ph: ; preds = %entry
95 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp5)
98 vector.body: ; preds = %vector.body, %vector.ph
99 %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %start, %vector.ph ]
100 %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
101 %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
102 %lsr.iv17 = bitcast i16* %lsr.iv to <4 x i16>*
103 %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
104 %tmp9 = sub i32 %tmp7, 4
105 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
106 %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
107 %tmp12 = add nsw <4 x i32> %pass, %tmp10
108 %tmp13 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp12)
109 %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
110 %tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
111 %tmp16 = icmp ne i32 %tmp15, 0
112 %lsr.iv.next = add nsw i32 %lsr.iv1, -1
113 br i1 %tmp16, label %vector.body, label %exit
115 exit: ; preds = %vector.body, %entry
116 %res = phi i32 [ 0, %entry ], [ %tmp13, %vector.body ]
120 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
121 declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
122 declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
123 declare i32 @llvm.start.loop.iterations.i32(i32)
124 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
125 declare <4 x i1> @llvm.arm.mve.vctp32(i32)
129 name: invariant_use_store
131 tracksRegLiveness: true
134 - { reg: '$r0', virtual-reg: '' }
135 - { reg: '$r1', virtual-reg: '' }
136 - { reg: '$r2', virtual-reg: '' }
142 - { id: 0, type: default, offset: 0, size: 16, alignment: 8, stack-id: default,
143 isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
144 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
146 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
147 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
148 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
149 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
150 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
151 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
154 machineFunctionInfo: {}
156 ; CHECK-LABEL: name: invariant_use_store
158 ; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
159 ; CHECK: liveins: $lr, $r0, $r1, $r2, $r7
160 ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
161 ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
162 ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
163 ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
164 ; CHECK: renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
165 ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load (s128) from %fixed-stack.0, align 8)
166 ; CHECK: tCBZ $r2, %bb.3
167 ; CHECK: bb.1.vector.ph:
168 ; CHECK: successors: %bb.2(0x80000000)
169 ; CHECK: liveins: $q0, $r0, $r1, $r2
170 ; CHECK: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
171 ; CHECK: $lr = MVE_DLSTP_32 killed renamable $r2
172 ; CHECK: bb.2.vector.body:
173 ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
174 ; CHECK: liveins: $lr, $q0, $q1, $r0, $r1
175 ; CHECK: renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
176 ; CHECK: renamable $q2 = nsw MVE_VMULi32 renamable $q0, killed renamable $q2, 0, $noreg, undef renamable $q2
177 ; CHECK: renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
178 ; CHECK: renamable $r1 = MVE_VSTRWU32_post renamable $q1, killed renamable $r1, 16, 0, killed $noreg :: (store (s128) into %ir.lsr.store, align 4)
179 ; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
181 ; CHECK: liveins: $q0
182 ; CHECK: renamable $r0, renamable $r1 = VMOVRRD renamable $d0, 14 /* CC::al */, $noreg
183 ; CHECK: renamable $r2, renamable $r3 = VMOVRRD killed renamable $d1, 14 /* CC::al */, $noreg, implicit killed $q0
184 ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
186 successors: %bb.3(0x30000000), %bb.1(0x50000000)
187 liveins: $r0, $r1, $r2, $r7, $lr
189 frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
190 frame-setup CFI_INSTRUCTION def_cfa_offset 8
191 frame-setup CFI_INSTRUCTION offset $lr, -4
192 frame-setup CFI_INSTRUCTION offset $r7, -8
193 renamable $r3 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
194 renamable $q0 = MVE_VLDRWU32 killed renamable $r3, 0, 0, $noreg :: (load (s128) from %fixed-stack.0, align 8)
198 successors: %bb.2(0x80000000)
199 liveins: $q0, $r0, $r1, $r2
201 renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
202 renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q1
203 renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
204 renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
205 renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
206 renamable $r12 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
207 $lr = t2DoLoopStart renamable $r12
208 $r3 = tMOVr killed $r12, 14 /* CC::al */, $noreg
211 successors: %bb.2(0x7c000000), %bb.3(0x04000000)
212 liveins: $q0, $q1, $r0, $r1, $r2, $r3
214 renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
215 MVE_VPST 8, implicit $vpr
216 renamable $r0, renamable $q2 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr :: (load (s64) from %ir.lsr.iv17, align 2)
217 $lr = tMOVr $r3, 14 /* CC::al */, $noreg
218 renamable $q2 = nsw MVE_VMULi32 renamable $q0, killed renamable $q2, 0, $noreg, undef renamable $q2
219 renamable $r3, dead $cpsr = nsw tSUBi8 killed $r3, 1, 14 /* CC::al */, $noreg
220 renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
221 renamable $q1 = MVE_VADDi32 killed renamable $q2, killed renamable $q1, 0, $noreg, undef renamable $q1
222 renamable $lr = t2LoopDec killed renamable $lr, 1
223 MVE_VPST 8, implicit $vpr
224 renamable $r1 = MVE_VSTRWU32_post renamable $q1, killed renamable $r1, 16, 1, killed renamable $vpr :: (store (s128) into %ir.lsr.store, align 4)
225 t2LoopEnd killed renamable $lr, %bb.2, implicit-def dead $cpsr
226 tB %bb.3, 14 /* CC::al */, $noreg
231 renamable $r0, renamable $r1 = VMOVRRD renamable $d0, 14 /* CC::al */, $noreg
232 renamable $r2, renamable $r3 = VMOVRRD killed renamable $d1, 14 /* CC::al */, $noreg, implicit $q0
233 tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
237 name: invariant_mul_use_reduce
239 tracksRegLiveness: true
242 - { reg: '$r0', virtual-reg: '' }
243 - { reg: '$r2', virtual-reg: '' }
249 - { id: 0, type: default, offset: 0, size: 16, alignment: 8, stack-id: default,
250 isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
251 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
253 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
254 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
255 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
256 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
257 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
258 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
261 machineFunctionInfo: {}
263 ; CHECK-LABEL: name: invariant_mul_use_reduce
265 ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
266 ; CHECK: liveins: $lr, $r0, $r2, $r7
267 ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
268 ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
269 ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
270 ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
271 ; CHECK: tCBZ $r2, %bb.4
272 ; CHECK: bb.1.vector.ph:
273 ; CHECK: successors: %bb.2(0x80000000)
274 ; CHECK: liveins: $r0, $r2
275 ; CHECK: renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
276 ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
277 ; CHECK: renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
278 ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
279 ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
280 ; CHECK: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
281 ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg :: (load (s128) from %fixed-stack.0, align 8)
282 ; CHECK: dead $lr = t2DLS renamable $r3
283 ; CHECK: $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
284 ; CHECK: bb.2.vector.body:
285 ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
286 ; CHECK: liveins: $q0, $r0, $r1, $r2
287 ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
288 ; CHECK: $lr = tMOVr $r1, 14 /* CC::al */, $noreg
289 ; CHECK: renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
290 ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
291 ; CHECK: MVE_VPST 8, implicit $vpr
292 ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr :: (load (s64) from %ir.lsr.iv17, align 2)
293 ; CHECK: renamable $r12 = MVE_VMLADAVu32 renamable $q0, killed renamable $q1, 0, $noreg
294 ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
296 ; CHECK: liveins: $r12
297 ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
298 ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
300 ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
301 ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
302 ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
304 successors: %bb.4(0x30000000), %bb.1(0x50000000)
305 liveins: $r0, $r2, $r7, $lr
307 frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
308 frame-setup CFI_INSTRUCTION def_cfa_offset 8
309 frame-setup CFI_INSTRUCTION offset $lr, -4
310 frame-setup CFI_INSTRUCTION offset $r7, -8
314 successors: %bb.2(0x80000000)
317 renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
318 renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
319 renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
320 renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
321 renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
322 renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
323 renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg :: (load (s128) from %fixed-stack.0, align 8)
324 $lr = t2DoLoopStart renamable $r3
325 $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
328 successors: %bb.2(0x7c000000), %bb.3(0x04000000)
329 liveins: $q0, $r0, $r1, $r2
331 renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
332 $lr = tMOVr $r1, 14 /* CC::al */, $noreg
333 renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
334 renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
335 MVE_VPST 8, implicit $vpr
336 renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr :: (load (s64) from %ir.lsr.iv17, align 2)
337 renamable $r12 = MVE_VMLADAVu32 renamable $q0, killed renamable $q1, 0, $noreg
338 renamable $lr = t2LoopDec killed renamable $lr, 1
339 t2LoopEnd killed renamable $lr, %bb.2, implicit-def dead $cpsr
340 tB %bb.3, 14 /* CC::al */, $noreg
345 $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
346 tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
349 renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
350 $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
351 tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
355 name: invariant_add_use_reduce
357 tracksRegLiveness: true
360 - { reg: '$r0', virtual-reg: '' }
361 - { reg: '$r2', virtual-reg: '' }
367 - { id: 0, type: default, offset: 0, size: 16, alignment: 8, stack-id: default,
368 isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
369 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
371 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
372 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
373 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
374 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
375 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
376 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
379 machineFunctionInfo: {}
381 ; CHECK-LABEL: name: invariant_add_use_reduce
383 ; CHECK: successors: %bb.4(0x30000000), %bb.1(0x50000000)
384 ; CHECK: liveins: $lr, $r0, $r2, $r7
385 ; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
386 ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
387 ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
388 ; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
389 ; CHECK: tCBZ $r2, %bb.4
390 ; CHECK: bb.1.vector.ph:
391 ; CHECK: successors: %bb.2(0x80000000)
392 ; CHECK: liveins: $r0, $r2
393 ; CHECK: renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
394 ; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
395 ; CHECK: renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
396 ; CHECK: renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
397 ; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
398 ; CHECK: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
399 ; CHECK: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg :: (load (s128) from %fixed-stack.0, align 8)
400 ; CHECK: dead $lr = t2DLS renamable $r3
401 ; CHECK: $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
402 ; CHECK: bb.2.vector.body:
403 ; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
404 ; CHECK: liveins: $q0, $r0, $r1, $r2
405 ; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
406 ; CHECK: MVE_VPST 8, implicit $vpr
407 ; CHECK: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr :: (load (s64) from %ir.lsr.iv17, align 2)
408 ; CHECK: $lr = tMOVr $r1, 14 /* CC::al */, $noreg
409 ; CHECK: renamable $q1 = nsw MVE_VADDi32 renamable $q0, killed renamable $q1, 0, $noreg, undef renamable $q1
410 ; CHECK: renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
411 ; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
412 ; CHECK: renamable $r12 = MVE_VADDVu32no_acc killed renamable $q1, 0, $noreg
413 ; CHECK: dead $lr = t2LEUpdate killed renamable $lr, %bb.2
415 ; CHECK: liveins: $r12
416 ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
417 ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
419 ; CHECK: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
420 ; CHECK: $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
421 ; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
423 successors: %bb.4(0x30000000), %bb.1(0x50000000)
424 liveins: $r0, $r2, $r7, $lr
426 frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
427 frame-setup CFI_INSTRUCTION def_cfa_offset 8
428 frame-setup CFI_INSTRUCTION offset $lr, -4
429 frame-setup CFI_INSTRUCTION offset $r7, -8
433 successors: %bb.2(0x80000000)
436 renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
437 renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
438 renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
439 renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
440 renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
441 renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
442 renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg :: (load (s128) from %fixed-stack.0, align 8)
443 $lr = t2DoLoopStart renamable $r3
444 $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
447 successors: %bb.2(0x7c000000), %bb.3(0x04000000)
448 liveins: $q0, $r0, $r1, $r2
450 renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
451 MVE_VPST 8, implicit $vpr
452 renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, killed renamable $vpr :: (load (s64) from %ir.lsr.iv17, align 2)
453 $lr = tMOVr $r1, 14 /* CC::al */, $noreg
454 renamable $q1 = nsw MVE_VADDi32 renamable $q0, killed renamable $q1, 0, $noreg, undef renamable $q1
455 renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
456 renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
457 renamable $r12 = MVE_VADDVu32no_acc killed renamable $q1, 0, $noreg
458 renamable $lr = t2LoopDec killed renamable $lr, 1
459 t2LoopEnd killed renamable $lr, %bb.2, implicit-def dead $cpsr
460 tB %bb.3, 14 /* CC::al */, $noreg
465 $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
466 tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0
469 renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
470 $r0 = tMOVr killed $r12, 14 /* CC::al */, $noreg
471 tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0