1 ; RUN: opt -mtriple=thumbv8.1m.main -mattr=+mve.fp -loop-unroll -S < %s -o - | FileCheck %s
5 ; CHECK: br i1 %7, label %middle.block, label %vector.body, !llvm.loop !0
7 ; CHECK: br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader13
9 ; CHECK: br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !2
11 define void @loopfn(float* %s1, float* %s2, float* %d, i32 %n) {
13 %cmp10 = icmp sgt i32 %n, 0
14 br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
16 for.body.preheader: ; preds = %entry
17 %min.iters.check = icmp ult i32 %n, 4
18 br i1 %min.iters.check, label %for.body.preheader13, label %vector.ph
20 for.body.preheader13: ; preds = %middle.block, %for.body.preheader
21 %i.011.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
24 vector.ph: ; preds = %for.body.preheader
25 %n.vec = and i32 %n, -4
28 vector.body: ; preds = %vector.body, %vector.ph
29 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
30 %0 = getelementptr inbounds float, float* %s1, i32 %index
31 %1 = bitcast float* %0 to <4 x float>*
32 %wide.load = load <4 x float>, <4 x float>* %1, align 4
33 %2 = getelementptr inbounds float, float* %s2, i32 %index
34 %3 = bitcast float* %2 to <4 x float>*
35 %wide.load12 = load <4 x float>, <4 x float>* %3, align 4
36 %4 = fadd fast <4 x float> %wide.load12, %wide.load
37 %5 = getelementptr inbounds float, float* %d, i32 %index
38 %6 = bitcast float* %5 to <4 x float>*
39 store <4 x float> %4, <4 x float>* %6, align 4
40 %index.next = add i32 %index, 4
41 %7 = icmp eq i32 %index.next, %n.vec
42 br i1 %7, label %middle.block, label %vector.body, !llvm.loop !0
44 middle.block: ; preds = %vector.body
45 %cmp.n = icmp eq i32 %n.vec, %n
46 br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader13
48 for.cond.cleanup.loopexit: ; preds = %for.body
49 br label %for.cond.cleanup
51 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %middle.block, %entry
54 for.body: ; preds = %for.body.preheader13, %for.body
55 %i.011 = phi i32 [ %add3, %for.body ], [ %i.011.ph, %for.body.preheader13 ]
56 %arrayidx = getelementptr inbounds float, float* %s1, i32 %i.011
57 %8 = load float, float* %arrayidx, align 4
58 %arrayidx1 = getelementptr inbounds float, float* %s2, i32 %i.011
59 %9 = load float, float* %arrayidx1, align 4
60 %add = fadd fast float %9, %8
61 %arrayidx2 = getelementptr inbounds float, float* %d, i32 %i.011
62 store float %add, float* %arrayidx2, align 4
63 %add3 = add nuw nsw i32 %i.011, 1
64 %exitcond = icmp eq i32 %add3, %n
65 br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !2
69 ; Same as above but without the nounroll on the remainder loop. Neither loop should be unrolled.
71 ; CHECK-LABEL: @remainder
73 ; CHECK: br i1 %7, label %middle.block, label %vector.body, !llvm.loop !0
74 ; CHECK: middle.block:
75 ; CHECK: br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader13
77 ; CHECK: br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !0
79 define void @remainder(float* %s1, float* %s2, float* %d, i32 %n) {
81 %cmp10 = icmp sgt i32 %n, 0
82 br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
84 for.body.preheader: ; preds = %entry
85 %min.iters.check = icmp ult i32 %n, 4
86 br i1 %min.iters.check, label %for.body.preheader13, label %vector.ph
88 for.body.preheader13: ; preds = %middle.block, %for.body.preheader
89 %i.011.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
92 vector.ph: ; preds = %for.body.preheader
93 %n.vec = and i32 %n, -4
96 vector.body: ; preds = %vector.body, %vector.ph
97 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
98 %0 = getelementptr inbounds float, float* %s1, i32 %index
99 %1 = bitcast float* %0 to <4 x float>*
100 %wide.load = load <4 x float>, <4 x float>* %1, align 4
101 %2 = getelementptr inbounds float, float* %s2, i32 %index
102 %3 = bitcast float* %2 to <4 x float>*
103 %wide.load12 = load <4 x float>, <4 x float>* %3, align 4
104 %4 = fadd fast <4 x float> %wide.load12, %wide.load
105 %5 = getelementptr inbounds float, float* %d, i32 %index
106 %6 = bitcast float* %5 to <4 x float>*
107 store <4 x float> %4, <4 x float>* %6, align 4
108 %index.next = add i32 %index, 4
109 %7 = icmp eq i32 %index.next, %n.vec
110 br i1 %7, label %middle.block, label %vector.body, !llvm.loop !0
112 middle.block: ; preds = %vector.body
113 %cmp.n = icmp eq i32 %n.vec, %n
114 br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader13
116 for.cond.cleanup.loopexit: ; preds = %for.body
117 br label %for.cond.cleanup
119 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %middle.block, %entry
122 for.body: ; preds = %for.body.preheader13, %for.body
123 %i.011 = phi i32 [ %add3, %for.body ], [ %i.011.ph, %for.body.preheader13 ]
124 %arrayidx = getelementptr inbounds float, float* %s1, i32 %i.011
125 %8 = load float, float* %arrayidx, align 4
126 %arrayidx1 = getelementptr inbounds float, float* %s2, i32 %i.011
127 %9 = load float, float* %arrayidx1, align 4
128 %add = fadd fast float %9, %8
129 %arrayidx2 = getelementptr inbounds float, float* %d, i32 %i.011
130 store float %add, float* %arrayidx2, align 4
131 %add3 = add nuw nsw i32 %i.011, 1
132 %exitcond = icmp eq i32 %add3, %n
133 br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !0
138 ; CHECK-LABEL: @nested
140 ; CHECK: br label %vector.body
141 ; CHECK: vector.body:
142 ; CHECK: br i1 %8, label %for.latch, label %vector.body, !llvm.loop !0
144 ; CHECK: br i1 %exitcond34, label %for.cond.cleanup.loopexit, label %for.outer
146 define void @nested(float* %s1, float* %s2, float* %d, i32 %n) {
148 %cmp31 = icmp eq i32 %n, 0
149 br i1 %cmp31, label %for.cond.cleanup, label %for.outer.preheader
151 for.outer.preheader: ; preds = %entry
152 %min.iters.check = icmp ult i32 %n, 4
153 %n.vec = and i32 %n, -4
154 %cmp.n = icmp eq i32 %n.vec, %n
157 for.outer: ; preds = %for.outer.preheader, %for.cond1.for.cond.cleanup3_crit_edge.us
158 %j.032.us = phi i32 [ %inc.us, %for.latch ], [ 0, %for.outer.preheader ]
159 %mul.us = mul i32 %j.032.us, %n
160 br label %vector.body
162 vector.body: ; preds = %for.outer, %vector.body
163 %index = phi i32 [ %index.next, %vector.body ], [ 0, %for.outer ]
164 %0 = add i32 %index, %mul.us
165 %1 = getelementptr inbounds float, float* %s1, i32 %0
166 %2 = bitcast float* %1 to <4 x float>*
167 %wide.load = load <4 x float>, <4 x float>* %2, align 4
168 %3 = getelementptr inbounds float, float* %s2, i32 %0
169 %4 = bitcast float* %3 to <4 x float>*
170 %wide.load35 = load <4 x float>, <4 x float>* %4, align 4
171 %5 = fadd fast <4 x float> %wide.load35, %wide.load
172 %6 = getelementptr inbounds float, float* %d, i32 %0
173 %7 = bitcast float* %6 to <4 x float>*
174 store <4 x float> %5, <4 x float>* %7, align 4
175 %index.next = add i32 %index, 4
176 %8 = icmp eq i32 %index.next, %n.vec
177 br i1 %8, label %for.latch, label %vector.body, !llvm.loop !0
179 for.latch: ; preds = %vector.body, %for.outer
180 %i.030.us.ph = phi i32 [ %n.vec, %vector.body ]
181 %inc.us = add nuw i32 %j.032.us, 1
182 %exitcond34 = icmp eq i32 %inc.us, %n
183 br i1 %exitcond34, label %for.cond.cleanup.loopexit, label %for.outer
185 for.cond.cleanup.loopexit:
186 br label %for.cond.cleanup
188 for.cond.cleanup: ; preds = %for.cond1.for.cond.cleanup3_crit_edge.us, %entry
192 ; Test that we don't unroll loops that only contain vector intrinsics.
193 ; CHECK-LABEL: test_intrinsics
194 ; CHECK: call <16 x i8> @llvm.arm.mve.sub
195 ; CHECK-NOT: call <16 x i8> @llvm.arm.mve.sub
196 define dso_local arm_aapcs_vfpcc void @test_intrinsics(i8* noalias nocapture readonly %a, i8* noalias nocapture readonly %b, i8* noalias nocapture %c, i32 %N) {
198 %cmp8 = icmp eq i32 %N, 0
199 %tmp8 = add i32 %N, 15
200 %tmp9 = lshr i32 %tmp8, 4
201 %tmp10 = shl nuw i32 %tmp9, 4
202 %tmp11 = add i32 %tmp10, -16
203 %tmp12 = lshr i32 %tmp11, 4
204 %tmp13 = add nuw nsw i32 %tmp12, 1
205 br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
208 br label %vector.body
211 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
212 %tmp14 = phi i32 [ %tmp13, %vector.ph ], [ %tmp15, %vector.body ]
213 %0 = phi i32 [ %N, %vector.ph ], [ %2, %vector.body ]
214 %tmp = getelementptr inbounds i8, i8* %a, i32 %index
215 %1 = call <16 x i1> @llvm.arm.mve.vctp8(i32 %0)
217 %tmp2 = bitcast i8* %tmp to <16 x i8>*
218 %wide.masked.load = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp2, i32 4, <16 x i1> %1, <16 x i8> undef)
219 %tmp3 = getelementptr inbounds i8, i8* %b, i32 %index
220 %tmp4 = bitcast i8* %tmp3 to <16 x i8>*
221 %wide.masked.load2 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp4, i32 4, <16 x i1> %1, <16 x i8> undef)
222 %sub = call <16 x i8> @llvm.arm.mve.sub.predicated.v16i8.v16i1(<16 x i8> %wide.masked.load2, <16 x i8> %wide.masked.load, <16 x i1> %1, <16 x i8> undef)
223 %tmp6 = getelementptr inbounds i8, i8* %c, i32 %index
224 %tmp7 = bitcast i8* %tmp6 to <16 x i8>*
225 tail call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %sub, <16 x i8>* %tmp7, i32 4, <16 x i1> %1)
226 %index.next = add i32 %index, 16
227 %tmp15 = sub i32 %tmp14, 1
228 %tmp16 = icmp ne i32 %tmp15, 0
229 br i1 %tmp16, label %vector.body, label %for.cond.cleanup
231 for.cond.cleanup: ; preds = %vector.body, %entry
235 declare <16 x i1> @llvm.arm.mve.vctp8(i32)
236 declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
237 declare <16 x i8> @llvm.arm.mve.sub.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>)
238 declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
241 !0 = distinct !{!0, !1}
242 !1 = !{!"llvm.loop.isvectorized", i32 1}
243 !2 = distinct !{!2, !3, !1}
244 !3 = !{!"llvm.loop.unroll.runtime.disable"}