1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve,+lob -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
4 # Local use after def, this mov is using r3:
6 # $r2 = tMOVr killed $r3, 14, $noreg
8 # We should optimise away the SUB
11 define dso_local void @incorrect_sub_32(ptr noalias nocapture %A, ptr noalias nocapture readonly %B, ptr noalias nocapture readonly %C, i32 %N) local_unnamed_addr #0 {
13 %cmp8 = icmp sgt i32 %N, 0
16 %2 = shl nuw i32 %1, 2
19 %5 = add nuw nsw i32 %4, 1
20 br i1 %cmp8, label %vector.ph, label %for.cond.cleanup
22 vector.ph: ; preds = %entry
23 %start = call i32 @llvm.start.loop.iterations.i32(i32 %5)
26 vector.body: ; preds = %vector.body, %vector.ph
27 %lsr.iv17 = phi ptr [ %scevgep18, %vector.body ], [ %A, %vector.ph ]
28 %lsr.iv14 = phi ptr [ %scevgep15, %vector.body ], [ %C, %vector.ph ]
29 %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %B, %vector.ph ]
30 %6 = phi i32 [ %start, %vector.ph ], [ %11, %vector.body ]
31 %7 = phi i32 [ %N, %vector.ph ], [ %9, %vector.body ]
32 %lsr.iv13 = bitcast ptr %lsr.iv to ptr
33 %lsr.iv1416 = bitcast ptr %lsr.iv14 to ptr
34 %lsr.iv1719 = bitcast ptr %lsr.iv17 to ptr
35 %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
37 %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv13, i32 4, <4 x i1> %8, <4 x i32> undef)
38 %wide.masked.load12 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv1416, i32 4, <4 x i1> %8, <4 x i32> undef)
39 %10 = add nsw <4 x i32> %wide.masked.load12, %wide.masked.load
40 call void @llvm.masked.store.v4i32.p0(<4 x i32> %10, ptr %lsr.iv1719, i32 4, <4 x i1> %8)
41 %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
42 %scevgep15 = getelementptr i32, ptr %lsr.iv14, i32 4
43 %scevgep18 = getelementptr i32, ptr %lsr.iv17, i32 4
44 %11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
45 %12 = icmp ne i32 %11, 0
46 br i1 %12, label %vector.body, label %for.cond.cleanup
48 for.cond.cleanup: ; preds = %vector.body, %entry
51 declare i32 @llvm.start.loop.iterations.i32(i32)
52 declare <4 x i1> @llvm.arm.mve.vctp32(i32)
53 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
54 declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
55 declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
59 name: incorrect_sub_32
61 exposesReturnsTwice: false
63 regBankSelected: false
66 tracksRegLiveness: true
70 - { reg: '$r0', virtual-reg: '' }
71 - { reg: '$r1', virtual-reg: '' }
72 - { reg: '$r2', virtual-reg: '' }
73 - { reg: '$r3', virtual-reg: '' }
75 isFrameAddressTaken: false
76 isReturnAddressTaken: false
86 cvBytesOfCalleeSavedRegisters: 0
87 hasOpaqueSPAdjustment: false
89 hasMustTailInVarArgFunc: false
95 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
96 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
97 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
98 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
99 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
100 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
103 machineFunctionInfo: {}
105 ; CHECK-LABEL: name: incorrect_sub_32
107 ; CHECK-NEXT: successors: %bb.1(0x80000000)
108 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r7
110 ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
111 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
112 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
113 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
114 ; CHECK-NEXT: tCMPi8 renamable $r3, 1, 14 /* CC::al */, $noreg, implicit-def $cpsr
115 ; CHECK-NEXT: t2IT 11, 8, implicit-def $itstate
116 ; CHECK-NEXT: tPOP_RET 11 /* CC::lt */, killed $cpsr, def $r7, def $pc, implicit killed $itstate
118 ; CHECK-NEXT: bb.1.vector.ph:
119 ; CHECK-NEXT: successors: %bb.2(0x80000000)
120 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
122 ; CHECK-NEXT: renamable $r12 = t2ADDri renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
123 ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
124 ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
125 ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
126 ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
128 ; CHECK-NEXT: bb.2.vector.body:
129 ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
130 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3
132 ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
133 ; CHECK-NEXT: MVE_VPST 4, implicit $vpr
134 ; CHECK-NEXT: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
135 ; CHECK-NEXT: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
136 ; CHECK-NEXT: renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 5, 14 /* CC::al */, $noreg
137 ; CHECK-NEXT: renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
138 ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
139 ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
140 ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
142 ; CHECK-NEXT: bb.3.for.cond.cleanup:
143 ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc
145 successors: %bb.1(0x80000000)
146 liveins: $r0, $r1, $r2, $r3, $r7, $lr
148 frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
149 frame-setup CFI_INSTRUCTION def_cfa_offset 8
150 frame-setup CFI_INSTRUCTION offset $lr, -4
151 frame-setup CFI_INSTRUCTION offset $r7, -8
152 tCMPi8 renamable $r3, 1, 14, $noreg, implicit-def $cpsr
153 t2IT 11, 8, implicit-def $itstate
154 tPOP_RET 11, killed $cpsr, def $r7, def $pc, implicit killed $itstate
157 successors: %bb.2(0x80000000)
158 liveins: $r0, $r1, $r2, $r3, $r7, $lr
160 renamable $r12 = t2ADDri renamable $r3, 3, 14, $noreg, $noreg
161 renamable $lr = t2MOVi 1, 14, $noreg, $noreg
162 renamable $r12 = t2BICri killed renamable $r12, 3, 14, $noreg, $noreg
163 renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
164 renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14, $noreg, $noreg
165 $lr = t2DoLoopStart renamable $lr
168 successors: %bb.2(0x7c000000), %bb.3(0x04000000)
169 liveins: $lr, $r0, $r1, $r2, $r3
171 renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg, $noreg
172 MVE_VPST 4, implicit $vpr
173 renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv13, align 4)
174 renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr, $noreg :: (load (s128) from %ir.lsr.iv1416, align 4)
175 renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 5, 14, $noreg
176 renamable $q0 = nsw MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
177 MVE_VPST 8, implicit $vpr
178 renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1719, align 4)
179 renamable $lr = t2LoopDec killed renamable $lr, 1
180 t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
183 bb.3.for.cond.cleanup:
184 tPOP_RET 14, $noreg, def $r7, def $pc