1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=enabled %s -o - | FileCheck %s
4 define dso_local arm_aapcs_vfpcc void @sext_i8(i16* noalias nocapture %a, i8* nocapture readonly %b, i32 %N) {
5 ; CHECK-LABEL: sext_i8:
6 ; CHECK: @ %bb.0: @ %entry
7 ; CHECK-NEXT: push {r7, lr}
8 ; CHECK-NEXT: cmp r2, #0
10 ; CHECK-NEXT: popeq {r7, pc}
11 ; CHECK-NEXT: .LBB0_1: @ %vector.ph
12 ; CHECK-NEXT: dlstp.16 lr, r2
13 ; CHECK-NEXT: .LBB0_2: @ %vector.body
14 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
15 ; CHECK-NEXT: vldrb.s16 q0, [r1], #8
16 ; CHECK-NEXT: vldrh.u16 q1, [r0]
17 ; CHECK-NEXT: vadd.i16 q0, q1, q0
18 ; CHECK-NEXT: vstrh.16 q0, [r0], #16
19 ; CHECK-NEXT: letp lr, .LBB0_2
20 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
21 ; CHECK-NEXT: pop {r7, pc}
23 %cmp8 = icmp eq i32 %N, 0
24 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
26 vector.ph: ; preds = %entry
27 %n.rnd.up = add i32 %N, 7
28 %n.vec = and i32 %n.rnd.up, -8
31 vector.body: ; preds = %vector.body, %vector.ph
32 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
33 %0 = getelementptr inbounds i8, i8* %b, i32 %index
34 %1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
35 %2 = bitcast i8* %0 to <8 x i8>*
36 %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef)
37 %3 = sext <8 x i8> %wide.masked.load to <8 x i16>
38 %4 = getelementptr inbounds i16, i16* %a, i32 %index
39 %5 = bitcast i16* %4 to <8 x i16>*
40 %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %5, i32 2, <8 x i1> %1, <8 x i16> undef)
41 %6 = add <8 x i16> %wide.masked.load12, %3
42 %7 = bitcast i16* %4 to <8 x i16>*
43 call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %6, <8 x i16>* %7, i32 2, <8 x i1> %1)
44 %index.next = add i32 %index, 8
45 %8 = icmp eq i32 %index.next, %n.vec
46 br i1 %8, label %for.cond.cleanup, label %vector.body
48 for.cond.cleanup: ; preds = %vector.body, %entry
52 ; Function Attrs: nofree norecurse nounwind
53 define dso_local arm_aapcs_vfpcc void @zext_i8(i16* noalias nocapture %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
54 ; CHECK-LABEL: zext_i8:
55 ; CHECK: @ %bb.0: @ %entry
56 ; CHECK-NEXT: push {r7, lr}
57 ; CHECK-NEXT: cmp r2, #0
59 ; CHECK-NEXT: popeq {r7, pc}
60 ; CHECK-NEXT: .LBB1_1: @ %vector.ph
61 ; CHECK-NEXT: dlstp.16 lr, r2
62 ; CHECK-NEXT: .LBB1_2: @ %vector.body
63 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
64 ; CHECK-NEXT: vldrb.u16 q0, [r1], #8
65 ; CHECK-NEXT: vldrh.u16 q1, [r0]
66 ; CHECK-NEXT: vadd.i16 q0, q1, q0
67 ; CHECK-NEXT: vstrh.16 q0, [r0], #16
68 ; CHECK-NEXT: letp lr, .LBB1_2
69 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
70 ; CHECK-NEXT: pop {r7, pc}
72 %cmp8 = icmp eq i32 %N, 0
73 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
75 vector.ph: ; preds = %entry
76 %n.rnd.up = add i32 %N, 7
77 %n.vec = and i32 %n.rnd.up, -8
80 vector.body: ; preds = %vector.body, %vector.ph
81 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
82 %0 = getelementptr inbounds i8, i8* %b, i32 %index
83 %1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
84 %2 = bitcast i8* %0 to <8 x i8>*
85 %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef)
86 %3 = zext <8 x i8> %wide.masked.load to <8 x i16>
87 %4 = getelementptr inbounds i16, i16* %a, i32 %index
88 %5 = bitcast i16* %4 to <8 x i16>*
89 %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %5, i32 2, <8 x i1> %1, <8 x i16> undef)
90 %6 = add <8 x i16> %wide.masked.load12, %3
91 %7 = bitcast i16* %4 to <8 x i16>*
92 call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %6, <8 x i16>* %7, i32 2, <8 x i1> %1)
93 %index.next = add i32 %index, 8
94 %8 = icmp eq i32 %index.next, %n.vec
95 br i1 %8, label %for.cond.cleanup, label %vector.body
97 for.cond.cleanup: ; preds = %vector.body, %entry
101 ; Function Attrs: nofree norecurse nounwind
102 define dso_local arm_aapcs_vfpcc void @sext_i16(i32* noalias nocapture %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
103 ; CHECK-LABEL: sext_i16:
104 ; CHECK: @ %bb.0: @ %entry
105 ; CHECK-NEXT: push {r7, lr}
106 ; CHECK-NEXT: cmp r2, #0
108 ; CHECK-NEXT: popeq {r7, pc}
109 ; CHECK-NEXT: .LBB2_1: @ %vector.ph
110 ; CHECK-NEXT: dlstp.32 lr, r2
111 ; CHECK-NEXT: .LBB2_2: @ %vector.body
112 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
113 ; CHECK-NEXT: vldrh.s32 q0, [r1], #8
114 ; CHECK-NEXT: vldrw.u32 q1, [r0]
115 ; CHECK-NEXT: vadd.i32 q0, q1, q0
116 ; CHECK-NEXT: vstrw.32 q0, [r0], #16
117 ; CHECK-NEXT: letp lr, .LBB2_2
118 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
119 ; CHECK-NEXT: pop {r7, pc}
121 %cmp6 = icmp eq i32 %N, 0
122 br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
124 vector.ph: ; preds = %entry
125 %n.rnd.up = add i32 %N, 3
126 %n.vec = and i32 %n.rnd.up, -4
127 br label %vector.body
129 vector.body: ; preds = %vector.body, %vector.ph
130 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
131 %0 = getelementptr inbounds i16, i16* %b, i32 %index
132 %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
133 %2 = bitcast i16* %0 to <4 x i16>*
134 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
135 %3 = sext <4 x i16> %wide.masked.load to <4 x i32>
136 %4 = getelementptr inbounds i32, i32* %a, i32 %index
137 %5 = bitcast i32* %4 to <4 x i32>*
138 %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %5, i32 4, <4 x i1> %1, <4 x i32> undef)
139 %6 = add nsw <4 x i32> %wide.masked.load10, %3
140 %7 = bitcast i32* %4 to <4 x i32>*
141 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %7, i32 4, <4 x i1> %1)
142 %index.next = add i32 %index, 4
143 %8 = icmp eq i32 %index.next, %n.vec
144 br i1 %8, label %for.cond.cleanup, label %vector.body
146 for.cond.cleanup: ; preds = %vector.body, %entry
150 ; Function Attrs: nofree norecurse nounwind
151 define dso_local arm_aapcs_vfpcc void @zext_i16(i32* noalias nocapture %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
152 ; CHECK-LABEL: zext_i16:
153 ; CHECK: @ %bb.0: @ %entry
154 ; CHECK-NEXT: push {r7, lr}
155 ; CHECK-NEXT: cmp r2, #0
157 ; CHECK-NEXT: popeq {r7, pc}
158 ; CHECK-NEXT: .LBB3_1: @ %vector.ph
159 ; CHECK-NEXT: dlstp.32 lr, r2
160 ; CHECK-NEXT: .LBB3_2: @ %vector.body
161 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
162 ; CHECK-NEXT: vldrh.u32 q0, [r1], #8
163 ; CHECK-NEXT: vldrw.u32 q1, [r0]
164 ; CHECK-NEXT: vadd.i32 q0, q1, q0
165 ; CHECK-NEXT: vstrw.32 q0, [r0], #16
166 ; CHECK-NEXT: letp lr, .LBB3_2
167 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
168 ; CHECK-NEXT: pop {r7, pc}
170 %cmp6 = icmp eq i32 %N, 0
171 br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
173 vector.ph: ; preds = %entry
174 %n.rnd.up = add i32 %N, 3
175 %n.vec = and i32 %n.rnd.up, -4
176 br label %vector.body
178 vector.body: ; preds = %vector.body, %vector.ph
179 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
180 %0 = getelementptr inbounds i16, i16* %b, i32 %index
181 %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
182 %2 = bitcast i16* %0 to <4 x i16>*
183 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
184 %3 = zext <4 x i16> %wide.masked.load to <4 x i32>
185 %4 = getelementptr inbounds i32, i32* %a, i32 %index
186 %5 = bitcast i32* %4 to <4 x i32>*
187 %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %5, i32 4, <4 x i1> %1, <4 x i32> undef)
188 %6 = add <4 x i32> %wide.masked.load10, %3
189 %7 = bitcast i32* %4 to <4 x i32>*
190 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %7, i32 4, <4 x i1> %1)
191 %index.next = add i32 %index, 4
192 %8 = icmp eq i32 %index.next, %n.vec
193 br i1 %8, label %for.cond.cleanup, label %vector.body
195 for.cond.cleanup: ; preds = %vector.body, %entry
199 declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
200 declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
201 declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
202 declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
203 declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
204 declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
206 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
207 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)