1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
5 define dso_local <4 x i32> @invariant_predicated_add_use(ptr nocapture readonly %a, ptr %c, i32 %N, <4 x i32> %pass) #0 {
7 %cmp9 = icmp eq i32 %N, 0
9 %tmp1 = lshr i32 %tmp, 2
10 %tmp2 = shl nuw i32 %tmp1, 2
11 %tmp3 = add i32 %tmp2, -4
12 %tmp4 = lshr i32 %tmp3, 2
13 %tmp5 = add nuw nsw i32 %tmp4, 1
14 br i1 %cmp9, label %exit, label %vector.ph
16 vector.ph: ; preds = %entry
17 %start = call i32 @llvm.start.loop.iterations.i32(i32 %tmp5)
20 vector.body: ; preds = %vector.body, %vector.ph
21 %lsr.iv1 = phi i32 [ %lsr.iv.next, %vector.body ], [ %start, %vector.ph ]
22 %lsr.iv = phi ptr [ %scevgep, %vector.body ], [ %a, %vector.ph ]
23 %tmp7 = phi i32 [ %N, %vector.ph ], [ %tmp9, %vector.body ]
24 %lsr.iv17 = bitcast ptr %lsr.iv to ptr
25 %tmp8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %tmp7)
26 %tmp9 = sub i32 %tmp7, 4
27 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
28 %tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
29 %acc.next = tail call <4 x i32> @llvm.arm.mve.add.predicated.v4i32.v4i1(<4 x i32> %pass, <4 x i32> %tmp10, <4 x i1> %tmp8, <4 x i32> undef)
30 %scevgep = getelementptr i16, ptr %lsr.iv, i32 4
31 %tmp11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
32 %tmp12 = icmp ne i32 %tmp11, 0
33 %lsr.iv.next = add nsw i32 %lsr.iv1, -1
34 br i1 %tmp12, label %vector.body, label %exit
36 exit: ; preds = %vector.body, %entry
37 %res = phi <4 x i32> [ zeroinitializer, %entry ], [ %acc.next, %vector.body ]
41 declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>)
42 declare i32 @llvm.start.loop.iterations.i32(i32)
43 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
44 declare <4 x i1> @llvm.arm.mve.vctp32(i32)
45 declare <4 x i32> @llvm.arm.mve.add.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i1>, <4 x i32>)
49 name: invariant_predicated_add_use
51 tracksRegLiveness: true
54 - { reg: '$r0', virtual-reg: '' }
55 - { reg: '$r2', virtual-reg: '' }
61 - { id: 0, type: default, offset: 0, size: 16, alignment: 8, stack-id: default,
62 isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
63 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
65 - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
66 stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
67 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
68 - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
69 stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
70 debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
73 machineFunctionInfo: {}
75 ; CHECK-LABEL: name: invariant_predicated_add_use
77 ; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.1(0x50000000)
78 ; CHECK-NEXT: liveins: $lr, $r0, $r2, $r7
80 ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
81 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
82 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
83 ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
84 ; CHECK-NEXT: tCBZ $r2, %bb.3
86 ; CHECK-NEXT: bb.1.vector.ph:
87 ; CHECK-NEXT: successors: %bb.2(0x80000000)
88 ; CHECK-NEXT: liveins: $r0, $r2
90 ; CHECK-NEXT: renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
91 ; CHECK-NEXT: renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
92 ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r2
94 ; CHECK-NEXT: bb.2.vector.body:
95 ; CHECK-NEXT: successors: %bb.2(0x7c000000), %bb.4(0x04000000)
96 ; CHECK-NEXT: liveins: $lr, $q0, $r0
98 ; CHECK-NEXT: renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 0, $noreg, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
99 ; CHECK-NEXT: renamable $q1 = MVE_VADDi32 renamable $q0, killed renamable $q1, 0, killed $noreg, $noreg, undef renamable $q1
100 ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.2
101 ; CHECK-NEXT: tB %bb.4, 14 /* CC::al */, $noreg
104 ; CHECK-NEXT: successors: %bb.4(0x80000000)
106 ; CHECK-NEXT: renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
108 ; CHECK-NEXT: bb.4.exit:
109 ; CHECK-NEXT: liveins: $q1
111 ; CHECK-NEXT: renamable $r0, renamable $r1 = VMOVRRD renamable $d2, 14 /* CC::al */, $noreg
112 ; CHECK-NEXT: renamable $r2, renamable $r3 = VMOVRRD killed renamable $d3, 14 /* CC::al */, $noreg, implicit killed $q1
113 ; CHECK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3
115 successors: %bb.3(0x30000000), %bb.1(0x50000000)
116 liveins: $r0, $r2, $r7, $lr
118 frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
119 frame-setup CFI_INSTRUCTION def_cfa_offset 8
120 frame-setup CFI_INSTRUCTION offset $lr, -4
121 frame-setup CFI_INSTRUCTION offset $r7, -8
125 successors: %bb.2(0x80000000)
128 renamable $r1, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
129 renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
130 renamable $r1 = t2BICri killed renamable $r1, 3, 14 /* CC::al */, $noreg, $noreg
131 renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
132 renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r1, 19, 14 /* CC::al */, $noreg, $noreg
133 renamable $r1 = tADDrSPi $sp, 2, 14 /* CC::al */, $noreg
134 renamable $q0 = MVE_VLDRWU32 killed renamable $r1, 0, 0, $noreg, $noreg :: (load (s128) from %fixed-stack.0, align 8)
135 $lr = t2DoLoopStart renamable $r3
136 $r1 = tMOVr killed $r3, 14 /* CC::al */, $noreg
139 successors: %bb.2(0x7c000000), %bb.4(0x04000000)
140 liveins: $q0, $r0, $r1, $r2
142 renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg, $noreg
143 $lr = tMOVr $r1, 14 /* CC::al */, $noreg
144 renamable $r1, dead $cpsr = nsw tSUBi8 killed $r1, 1, 14 /* CC::al */, $noreg
145 renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
146 renamable $lr = t2LoopDec killed renamable $lr, 1
147 MVE_VPST 4, implicit $vpr
148 renamable $r0, renamable $q1 = MVE_VLDRHS32_post killed renamable $r0, 8, 1, renamable $vpr, $noreg :: (load (s64) from %ir.lsr.iv17, align 2)
149 renamable $q1 = MVE_VADDi32 renamable $q0, killed renamable $q1, 1, killed renamable $vpr, $noreg, undef renamable $q1
150 t2LoopEnd killed renamable $lr, %bb.2, implicit-def dead $cpsr
151 tB %bb.4, 14 /* CC::al */, $noreg
154 successors: %bb.4(0x80000000)
156 renamable $q1 = MVE_VMOVimmi32 0, 0, $noreg, $noreg, undef renamable $q1
161 renamable $r0, renamable $r1 = VMOVRRD renamable $d2, 14 /* CC::al */, $noreg
162 renamable $r2, renamable $r3 = VMOVRRD killed renamable $d3, 14 /* CC::al */, $noreg, implicit $q1
163 tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3