1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
4 # Test that, though the vctp operand is defined at the end of the block,
5 # that the correct value is used for the dlstp.
8 define dso_local arm_aapcs_vfpcc void @start_before_elems(ptr noalias nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) local_unnamed_addr #0 {
11 %cmp9 = icmp eq i32 %div, 0
12 %0 = add nuw i32 %div, 3
14 %2 = shl nuw i32 %1, 2
17 %5 = add nuw nsw i32 %4, 1
18 br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
20 vector.ph: ; preds = %entry
21 %start = call i32 @llvm.start.loop.iterations.i32(i32 %5)
24 vector.body: ; preds = %vector.body, %vector.ph
25 %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %a, %vector.ph ]
26 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
27 %6 = phi i32 [ %start, %vector.ph ], [ %13, %vector.body ]
28 %7 = phi i32 [ %div, %vector.ph ], [ %9, %vector.body ]
29 %lsr.iv1 = bitcast ptr %lsr.iv to ptr
30 %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
32 %scevgep4 = getelementptr i8, ptr %b, i32 %index
33 %scevgep45 = bitcast ptr %scevgep4 to ptr
34 %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %scevgep45, i32 1, <4 x i1> %8, <4 x i8> undef)
35 %10 = zext <4 x i8> %wide.masked.load to <4 x i32>
36 %scevgep2 = getelementptr i8, ptr %c, i32 %index
37 %scevgep23 = bitcast ptr %scevgep2 to ptr
38 %wide.masked.load13 = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr %scevgep23, i32 1, <4 x i1> %8, <4 x i8> undef)
39 %11 = zext <4 x i8> %wide.masked.load13 to <4 x i32>
40 %12 = mul nuw nsw <4 x i32> %11, %10
41 call void @llvm.masked.store.v4i32.p0(<4 x i32> %12, ptr %lsr.iv1, i32 4, <4 x i1> %8)
42 %index.next = add i32 %index, 4
43 %scevgep = getelementptr i32, ptr %lsr.iv, i32 4
44 %13 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
45 %14 = icmp ne i32 %13, 0
46 br i1 %14, label %vector.body, label %for.cond.cleanup
48 for.cond.cleanup: ; preds = %vector.body, %entry
51 declare <4 x i8> @llvm.masked.load.v4i8.p0(ptr, i32 immarg, <4 x i1>, <4 x i8>) #1
52 declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) #2
53 declare i32 @llvm.start.loop.iterations.i32(i32) #3
54 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
55 declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
59 name: start_before_elems
61 exposesReturnsTwice: false
63 regBankSelected: false
66 tracksRegLiveness: true
70 - { reg: '$r0', virtual-reg: '' }
71 - { reg: '$r1', virtual-reg: '' }
72 - { reg: '$r2', virtual-reg: '' }
73 - { reg: '$r3', virtual-reg: '' }
75 isFrameAddressTaken: false
76 isReturnAddressTaken: false
86 cvBytesOfCalleeSavedRegisters: 0
87 hasOpaqueSPAdjustment: false
89 hasMustTailInVarArgFunc: false
95 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
96 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
97 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
98 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
99 stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
100 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
103 machineFunctionInfo: {}
105 ; CHECK-LABEL: name: start_before_elems
107 ; CHECK-NEXT: successors: %bb.1(0x80000000)
108 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r4
110 ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
111 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
112 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
113 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r4, -8
114 ; CHECK-NEXT: renamable $r12 = t2MOVi 0, 14 /* CC::al */, $noreg, $noreg
115 ; CHECK-NEXT: t2CMPrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
116 ; CHECK-NEXT: t2IT 0, 8, implicit-def $itstate
117 ; CHECK-NEXT: tPOP_RET 0 /* CC::eq */, killed $cpsr, def $r4, def $pc, implicit killed $itstate
119 ; CHECK-NEXT: bb.1.vector.ph:
120 ; CHECK-NEXT: successors: %bb.2(0x80000000)
121 ; CHECK-NEXT: liveins: $r0, $r1, $r2, $r3
123 ; CHECK-NEXT: renamable $r12 = t2MOVi 3, 14 /* CC::al */, $noreg, $noreg
124 ; CHECK-NEXT: renamable $lr = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
125 ; CHECK-NEXT: renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14 /* CC::al */, $noreg, $noreg
126 ; CHECK-NEXT: renamable $r12 = t2BICri killed renamable $r12, 3, 14 /* CC::al */, $noreg, $noreg
127 ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
128 ; CHECK-NEXT: renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
129 ; CHECK-NEXT: $r12 = t2MOVr killed $r3, 14 /* CC::al */, $noreg, $noreg
130 ; CHECK-NEXT: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
131 ; CHECK-NEXT: renamable $r12 = t2LSRri killed renamable $r12, 1, 14 /* CC::al */, $noreg, $noreg
133 ; CHECK-NEXT: bb.2.vector.body:
134 ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
135 ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r2, $r3, $r12
137 ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14 /* CC::al */, $noreg
138 ; CHECK-NEXT: renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
139 ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
140 ; CHECK-NEXT: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep45, align 1)
141 ; CHECK-NEXT: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14 /* CC::al */, $noreg
142 ; CHECK-NEXT: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14 /* CC::al */, $noreg
143 ; CHECK-NEXT: renamable $r12 = t2SUBri killed renamable $r12, 4, 14 /* CC::al */, $noreg, $noreg
144 ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
145 ; CHECK-NEXT: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep23, align 1)
146 ; CHECK-NEXT: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
147 ; CHECK-NEXT: MVE_VPST 8, implicit $vpr
148 ; CHECK-NEXT: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
149 ; CHECK-NEXT: $lr = t2LEUpdate killed renamable $lr, %bb.2
151 ; CHECK-NEXT: bb.3.for.cond.cleanup:
152 ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $pc
154 successors: %bb.1(0x80000000)
155 liveins: $r0, $r1, $r2, $r3, $r4, $lr
157 frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
158 frame-setup CFI_INSTRUCTION def_cfa_offset 8
159 frame-setup CFI_INSTRUCTION offset $lr, -4
160 frame-setup CFI_INSTRUCTION offset $r4, -8
161 renamable $r12 = t2MOVi 0, 14, $noreg, $noreg
162 t2CMPrs killed renamable $r12, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
163 t2IT 0, 8, implicit-def $itstate
164 tPOP_RET 0, killed $cpsr, def $r4, def $pc, implicit killed $itstate
167 successors: %bb.2(0x80000000)
168 liveins: $r0, $r1, $r2, $r3, $r4, $lr
170 renamable $r12 = t2MOVi 3, 14, $noreg, $noreg
171 renamable $lr = t2MOVi 1, 14, $noreg, $noreg
172 renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14, $noreg, $noreg
173 renamable $r12 = t2BICri killed renamable $r12, 3, 14, $noreg, $noreg
174 renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
175 renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14, $noreg, $noreg
176 $lr = t2DoLoopStart renamable $lr
177 $r12 = t2MOVr killed $r3, 14, $noreg, $noreg
178 renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
179 renamable $r12 = t2LSRri killed renamable $r12, 1, 14, $noreg, $noreg
182 successors: %bb.2(0x7c000000), %bb.3(0x04000000)
183 liveins: $lr, $r0, $r1, $r2, $r3, $r12
185 renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14, $noreg
186 renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg, $noreg
187 MVE_VPST 8, implicit $vpr
188 renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep45, align 1)
189 renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
190 renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14, $noreg
191 renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
192 MVE_VPST 8, implicit $vpr
193 renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr, $noreg :: (load (s32) from %ir.scevgep23, align 1)
194 renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, $noreg, undef renamable $q0
195 MVE_VPST 8, implicit $vpr
196 renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr, $noreg :: (store (s128) into %ir.lsr.iv1, align 4)
197 renamable $lr = t2LoopDec killed renamable $lr, 1
198 t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
201 bb.3.for.cond.cleanup:
202 tPOP_RET 14, $noreg, def $r4, def $pc