1 ; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -tail-predication=enabled -mattr=+mve %s -S -o - | FileCheck %s
3 ; CHECK-LABEL: reduction_i32
4 ; CHECK: phi i32 [ 0, %vector.ph ]
5 ; CHECK: phi <8 x i16> [ zeroinitializer, %vector.ph ]
7 ; CHECK: [[PHI:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[ELEMS:%[^ ]+]], %vector.body ]
8 ; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[PHI]])
9 ; CHECK: [[ELEMS]] = sub i32 [[PHI]], 8
10 ; CHECK: call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
11 ; CHECK: call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp6, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
12 define i16 @reduction_i32(i16* nocapture readonly %A, i16* nocapture readonly %B, i32 %N) {
14 %cmp8 = icmp eq i32 %N, 0
15 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
19 %n.rnd.up = add i32 %tmp, 8
20 %n.vec = and i32 %n.rnd.up, -8
21 %0 = add i32 %n.vec, -8
24 %start = call i32 @llvm.start.loop.iterations.i32(i32 %2)
27 vector.body: ; preds = %vector.body, %vector.ph
28 %index = phi i32 [ 0, %vector.ph], [ %index.next, %vector.body ]
29 %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph], [ %tmp8, %vector.body ]
30 %3 = phi i32 [ %start, %vector.ph], [ %4, %vector.body ]
31 %tmp2 = getelementptr inbounds i16, i16* %A, i32 %index
32 %tmp3 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
33 %tmp4 = bitcast i16* %tmp2 to <8 x i16>*
34 %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> %tmp3, <8 x i16> undef)
35 %tmp5 = getelementptr inbounds i16, i16* %B, i32 %index
36 %tmp6 = bitcast i16* %tmp5 to <8 x i16>*
37 %wide.masked.load3 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp6, i32 4, <8 x i1> %tmp3, <8 x i16> undef)
38 %tmp7 = add <8 x i16> %wide.masked.load, %vec.phi
39 %tmp8 = add <8 x i16> %tmp7, %wide.masked.load3
40 %index.next = add i32 %index, 8
41 %4 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %3, i32 1)
42 %5 = icmp ne i32 %4, 0
43 br i1 %5, label %vector.body, label %middle.block
45 middle.block: ; preds = %vector.body
46 %vec.phi.lcssa = phi <8 x i16> [ %vec.phi, %vector.body ]
47 %.lcssa3 = phi <8 x i1> [ %tmp3, %vector.body ]
48 %.lcssa = phi <8 x i16> [ %tmp8, %vector.body ]
49 %tmp10 = select <8 x i1> %.lcssa3, <8 x i16> %.lcssa, <8 x i16> %vec.phi.lcssa
50 %rdx.shuf = shufflevector <8 x i16> %tmp10, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
51 %bin.rdx = add <8 x i16> %rdx.shuf, %tmp10
52 %rdx.shuf4 = shufflevector <8 x i16> %bin.rdx, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
53 %bin.rdx5 = add <8 x i16> %rdx.shuf4, %bin.rdx
54 %rdx.shuf6 = shufflevector <8 x i16> %bin.rdx5, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
55 %bin.rdx7 = add <8 x i16> %rdx.shuf6, %bin.rdx5
56 %tmp11 = extractelement <8 x i16> %bin.rdx7, i32 0
60 %res.0 = phi i16 [ 0, %entry ]
64 ; CHECK-LABEL: reduction_i32_with_scalar
66 ; CHECK: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
67 ; CHECK: %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %{{.*}}, %vector.body ]
68 ; CHECK: %{{.*}} = phi i32 [ %{{.*}}, %vector.ph ], [ %{{.*}}, %vector.body ]
69 ; CHECK: [[PHI:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[ELEMS:%[^ ]+]], %vector.body ]
70 ; CHECK: [[VCTP:%[^ ]+]] = call <8 x i1> @llvm.arm.mve.vctp16(i32 [[PHI]])
71 ; CHECK: [[ELEMS]] = sub i32 [[PHI]], 8
72 ; CHECK: call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> [[VCTP]], <8 x i16> undef)
73 define i16 @reduction_i32_with_scalar(i16* nocapture readonly %A, i16 %B, i32 %N) local_unnamed_addr {
75 %cmp8 = icmp eq i32 %N, 0
76 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
80 %n.rnd.up = add nuw nsw i32 %tmp, 8
81 %n.vec = and i32 %n.rnd.up, -8
82 %broadcast.splatinsert3 = insertelement <8 x i16> undef, i16 %B, i32 0
83 %broadcast.splat4 = shufflevector <8 x i16> %broadcast.splatinsert3, <8 x i16> undef, <8 x i32> zeroinitializer
84 %0 = add i32 %n.vec, -8
86 %2 = add nuw nsw i32 %1, 1
87 %start = call i32 @llvm.start.loop.iterations.i32(i32 %2)
90 vector.body: ; preds = %vector.body, %vector.ph
91 %index = phi i32 [ 0, %vector.ph], [ %index.next, %vector.body ]
92 %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph], [ %tmp6, %vector.body ]
93 %3 = phi i32 [ %start, %vector.ph], [ %4, %vector.body ]
94 %tmp2 = getelementptr inbounds i16, i16* %A, i32 %index
95 %tmp3 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
96 %tmp4 = bitcast i16* %tmp2 to <8 x i16>*
97 %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> %tmp3, <8 x i16> undef)
98 %tmp5 = add <8 x i16> %vec.phi, %broadcast.splat4
99 %tmp6 = add <8 x i16> %tmp5, %wide.masked.load
100 %index.next = add nuw nsw i32 %index, 8
101 %4 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %3, i32 1)
102 %5 = icmp ne i32 %4, 0
103 br i1 %5, label %vector.body, label %middle.block
105 middle.block: ; preds = %vector.body
106 %tmp8 = select <8 x i1> %tmp3, <8 x i16> %tmp6, <8 x i16> %vec.phi
107 %rdx.shuf = shufflevector <8 x i16> %tmp8, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
108 %bin.rdx = add <8 x i16> %rdx.shuf, %tmp8
109 %rdx.shuf5 = shufflevector <8 x i16> %bin.rdx, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
110 %bin.rdx6 = add <8 x i16> %rdx.shuf5, %bin.rdx
111 %rdx.shuf7 = shufflevector <8 x i16> %bin.rdx6, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
112 %bin.rdx8 = add <8 x i16> %rdx.shuf7, %bin.rdx6
113 %tmp9 = extractelement <8 x i16> %bin.rdx8, i32 0
117 %res.0 = phi i16 [ 0, %entry ]
121 ; The vector loop is not guarded with an entry check (N == 0). Check that
122 ; despite this we can still calculate a precise enough range so that the
123 ; the overflow checks for get.active.active.lane.mask don't reject
126 ; CHECK-LABEL: @reduction_not_guarded
128 ; CHECK: vector.body:
129 ; CHECK: @llvm.arm.mve.vctp
130 ; CHECK-NOT: @llvm.get.active.lane.mask.v8i1.i32
133 define i16 @reduction_not_guarded(i16* nocapture readonly %A, i16 %B, i32 %N) local_unnamed_addr {
135 %tmp = add i32 %N, -1
136 %n.rnd.up = add nuw nsw i32 %tmp, 8
137 %n.vec = and i32 %n.rnd.up, -8
138 %broadcast.splatinsert3 = insertelement <8 x i16> undef, i16 %B, i32 0
139 %broadcast.splat4 = shufflevector <8 x i16> %broadcast.splatinsert3, <8 x i16> undef, <8 x i32> zeroinitializer
140 %0 = add i32 %n.vec, -8
142 %2 = add nuw nsw i32 %1, 1
143 %start = call i32 @llvm.start.loop.iterations.i32(i32 %2)
144 br label %vector.body
146 vector.body: ; preds = %vector.body, %vector.ph
147 %index = phi i32 [ 0, %entry], [ %index.next, %vector.body ]
148 %vec.phi = phi <8 x i16> [ zeroinitializer, %entry], [ %tmp6, %vector.body ]
149 %3 = phi i32 [ %start, %entry ], [ %4, %vector.body ]
150 %tmp2 = getelementptr inbounds i16, i16* %A, i32 %index
151 %tmp3 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
152 %tmp4 = bitcast i16* %tmp2 to <8 x i16>*
153 %wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp4, i32 4, <8 x i1> %tmp3, <8 x i16> undef)
154 %tmp5 = add <8 x i16> %vec.phi, %broadcast.splat4
155 %tmp6 = add <8 x i16> %tmp5, %wide.masked.load
156 %index.next = add nuw nsw i32 %index, 8
157 %4 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %3, i32 1)
158 %5 = icmp ne i32 %4, 0
159 br i1 %5, label %vector.body, label %middle.block
161 middle.block: ; preds = %vector.body
162 %tmp8 = select <8 x i1> %tmp3, <8 x i16> %tmp6, <8 x i16> %vec.phi
163 %rdx.shuf = shufflevector <8 x i16> %tmp8, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
164 %bin.rdx = add <8 x i16> %rdx.shuf, %tmp8
165 %rdx.shuf5 = shufflevector <8 x i16> %bin.rdx, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
166 %bin.rdx6 = add <8 x i16> %rdx.shuf5, %bin.rdx
167 %rdx.shuf7 = shufflevector <8 x i16> %bin.rdx6, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
168 %bin.rdx8 = add <8 x i16> %rdx.shuf7, %bin.rdx6
169 %tmp9 = extractelement <8 x i16> %bin.rdx8, i32 0
173 ; CHECK-LABEL: @Correlation
174 ; CHECK: vector.body:
175 ; CHECK: @llvm.arm.mve.vctp
176 ; CHECK-NOT: %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask
178 define dso_local void @Correlation(i16* nocapture readonly %Input, i16* nocapture %Output, i16 signext %Size, i16 signext %N, i16 signext %Scale) local_unnamed_addr #0 {
180 %conv = sext i16 %N to i32
181 %cmp36 = icmp sgt i16 %N, 0
182 br i1 %cmp36, label %for.body.lr.ph, label %for.end17
185 %conv2 = sext i16 %Size to i32
186 %conv1032 = zext i16 %Scale to i32
187 %0 = add i32 %conv2, 3
191 %lsr.iv51 = phi i32 [ %lsr.iv.next, %for.end ], [ %0, %for.body.lr.ph ]
192 %lsr.iv46 = phi i16* [ %scevgep47, %for.end ], [ %Input, %for.body.lr.ph ]
193 %i.037 = phi i32 [ 0, %for.body.lr.ph ], [ %inc16, %for.end ]
194 %1 = mul nsw i32 %i.037, -1
197 %4 = shl nuw i32 %3, 2
200 %7 = add nuw nsw i32 %6, 1
201 %8 = sub i32 %conv2, %i.037
202 %cmp433 = icmp slt i32 %i.037, %conv2
203 br i1 %cmp433, label %vector.ph, label %for.end
205 vector.ph: ; preds = %for.body
206 %start = call i32 @llvm.start.loop.iterations.i32(i32 %7)
207 br label %vector.body
209 vector.body: ; preds = %vector.body, %vector.ph
210 %lsr.iv48 = phi i16* [ %scevgep49, %vector.body ], [ %lsr.iv46, %vector.ph ]
211 %lsr.iv = phi i16* [ %scevgep, %vector.body ], [ %Input, %vector.ph ]
212 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
213 %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %16, %vector.body ]
214 %9 = phi i32 [ %start, %vector.ph ], [ %17, %vector.body ]
215 %lsr.iv4850 = bitcast i16* %lsr.iv48 to <4 x i16>*
216 %lsr.iv45 = bitcast i16* %lsr.iv to <4 x i16>*
217 %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %8)
218 %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv45, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
219 %10 = sext <4 x i16> %wide.masked.load to <4 x i32>
220 %wide.masked.load42 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv4850, i32 2, <4 x i1> %active.lane.mask, <4 x i16> undef)
221 %11 = sext <4 x i16> %wide.masked.load42 to <4 x i32>
222 %12 = mul nsw <4 x i32> %11, %10
223 %13 = insertelement <4 x i32> undef, i32 %conv1032, i32 0
224 %14 = shufflevector <4 x i32> %13, <4 x i32> undef, <4 x i32> zeroinitializer
225 %15 = ashr <4 x i32> %12, %14
226 %16 = add <4 x i32> %15, %vec.phi
227 %index.next = add i32 %index, 4
228 %scevgep = getelementptr i16, i16* %lsr.iv, i32 4
229 %scevgep49 = getelementptr i16, i16* %lsr.iv48, i32 4
230 %17 = call i32 @llvm.loop.decrement.reg.i32(i32 %9, i32 1)
231 %18 = icmp ne i32 %17, 0
232 br i1 %18, label %vector.body, label %middle.block
234 middle.block: ; preds = %vector.body
235 %19 = select <4 x i1> %active.lane.mask, <4 x i32> %16, <4 x i32> %vec.phi
236 %20 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %19)
239 for.end: ; preds = %middle.block, %for.body
240 %Sum.0.lcssa = phi i32 [ 0, %for.body ], [ %20, %middle.block ]
241 %21 = lshr i32 %Sum.0.lcssa, 16
242 %conv13 = trunc i32 %21 to i16
243 %arrayidx14 = getelementptr inbounds i16, i16* %Output, i32 %i.037
244 store i16 %conv13, i16* %arrayidx14, align 2
245 %inc16 = add nuw nsw i32 %i.037, 1
246 %scevgep47 = getelementptr i16, i16* %lsr.iv46, i32 1
247 %lsr.iv.next = add i32 %lsr.iv51, -1
248 %exitcond39 = icmp eq i32 %inc16, %conv
249 br i1 %exitcond39, label %for.end17, label %for.body
251 for.end17: ; preds = %for.end, %entry
255 declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
256 declare i32 @llvm.start.loop.iterations.i32(i32)
257 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
258 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
259 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
260 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
261 declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
262 declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)