1 ; RUN: opt < %s -loop-unroll -pragma-unroll-threshold=1024 -S | FileCheck -check-prefixes=CHECK,REM %s
2 ; RUN: opt < %s -loop-unroll -loop-unroll -pragma-unroll-threshold=1024 -S | FileCheck -check-prefixes=CHECK,REM %s
3 ; RUN: opt < %s -loop-unroll -unroll-allow-remainder=0 -pragma-unroll-threshold=1024 -S | FileCheck -check-prefixes=CHECK,NOREM %s
5 ; Run loop unrolling twice to verify that loop unrolling metadata is properly
6 ; removed and further unrolling is disabled after the pass is run once.
8 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9 target triple = "x86_64-unknown-linux-gnu"
11 ; loop4 contains a small loop which should be completely unrolled by
12 ; the default unrolling heuristics. It serves as a control for the
13 ; unroll(disable) pragma test loop4_with_disable.
15 ; CHECK-LABEL: @loop4(
17 define void @loop4(i32* nocapture %a) {
21 for.body: ; preds = %for.body, %entry
22 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
23 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
24 %0 = load i32, i32* %arrayidx, align 4
25 %inc = add nsw i32 %0, 1
26 store i32 %inc, i32* %arrayidx, align 4
27 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
28 %exitcond = icmp eq i64 %indvars.iv.next, 4
29 br i1 %exitcond, label %for.end, label %for.body
31 for.end: ; preds = %for.body
35 ; #pragma clang loop unroll(disable)
37 ; CHECK-LABEL: @loop4_with_disable(
39 ; CHECK-NOT: store i32
41 define void @loop4_with_disable(i32* nocapture %a) {
45 for.body: ; preds = %for.body, %entry
46 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
47 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
48 %0 = load i32, i32* %arrayidx, align 4
49 %inc = add nsw i32 %0, 1
50 store i32 %inc, i32* %arrayidx, align 4
51 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
52 %exitcond = icmp eq i64 %indvars.iv.next, 4
53 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
55 for.end: ; preds = %for.body
59 !2 = !{!"llvm.loop.unroll.disable"}
61 ; loop64 has a high enough count that it should *not* be unrolled by
62 ; the default unrolling heuristic. It serves as the control for the
63 ; unroll(full) pragma test loop64_with_.* tests below.
65 ; CHECK-LABEL: @loop64(
67 ; CHECK-NOT: store i32
69 define void @loop64(i32* nocapture %a) {
73 for.body: ; preds = %for.body, %entry
74 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
75 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
76 %0 = load i32, i32* %arrayidx, align 4
77 %inc = add nsw i32 %0, 1
78 store i32 %inc, i32* %arrayidx, align 4
79 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
80 %exitcond = icmp eq i64 %indvars.iv.next, 64
81 br i1 %exitcond, label %for.end, label %for.body
83 for.end: ; preds = %for.body
87 ; #pragma clang loop unroll(full)
88 ; Loop should be fully unrolled.
90 ; CHECK-LABEL: @loop64_with_full(
92 define void @loop64_with_full(i32* nocapture %a) {
96 for.body: ; preds = %for.body, %entry
97 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
98 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
99 %0 = load i32, i32* %arrayidx, align 4
100 %inc = add nsw i32 %0, 1
101 store i32 %inc, i32* %arrayidx, align 4
102 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
103 %exitcond = icmp eq i64 %indvars.iv.next, 64
104 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
106 for.end: ; preds = %for.body
110 !4 = !{!"llvm.loop.unroll.full"}
112 ; #pragma clang loop unroll_count(4)
113 ; Loop should be unrolled 4 times.
115 ; CHECK-LABEL: @loop64_with_count4(
120 ; CHECK-NOT: store i32
122 define void @loop64_with_count4(i32* nocapture %a) {
126 for.body: ; preds = %for.body, %entry
127 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
128 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
129 %0 = load i32, i32* %arrayidx, align 4
130 %inc = add nsw i32 %0, 1
131 store i32 %inc, i32* %arrayidx, align 4
132 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
133 %exitcond = icmp eq i64 %indvars.iv.next, 64
134 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !5
136 for.end: ; preds = %for.body
140 !6 = !{!"llvm.loop.unroll.count", i32 4}
142 ; #pragma clang loop unroll(full)
143 ; Full unrolling is requested, but loop has a runtime trip count so
144 ; no unrolling should occur.
146 ; CHECK-LABEL: @runtime_loop_with_full(
148 ; CHECK-NOT: store i32
149 define void @runtime_loop_with_full(i32* nocapture %a, i32 %b) {
151 %cmp3 = icmp sgt i32 %b, 0
152 br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !8
154 for.body: ; preds = %entry, %for.body
155 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
156 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
157 %0 = load i32, i32* %arrayidx, align 4
158 %inc = add nsw i32 %0, 1
159 store i32 %inc, i32* %arrayidx, align 4
160 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
161 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
162 %exitcond = icmp eq i32 %lftr.wideiv, %b
163 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !8
165 for.end: ; preds = %for.body, %entry
170 ; #pragma clang loop unroll_count(4)
171 ; Loop has a runtime trip count. Runtime unrolling should occur and loop
172 ; should be duplicated (original and 4x unrolled) if remainder is allowed,
173 ; otherwise loop should not be unrolled.
175 ; CHECK-LABEL: @runtime_loop_with_count4(
183 ; REM: for.body.epil:
185 ; NOREM-NOT: for.body.epil:
190 define void @runtime_loop_with_count4(i32* nocapture %a, i32 %b) {
192 %cmp3 = icmp sgt i32 %b, 0
193 br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !9
195 for.body: ; preds = %entry, %for.body
196 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
197 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
198 %0 = load i32, i32* %arrayidx, align 4
199 %inc = add nsw i32 %0, 1
200 store i32 %inc, i32* %arrayidx, align 4
201 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
202 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
203 %exitcond = icmp eq i32 %lftr.wideiv, %b
204 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !9
206 for.end: ; preds = %for.body, %entry
211 ; #pragma clang loop unroll_count(1)
212 ; Loop should not be unrolled
214 ; CHECK-LABEL: @unroll_1(
216 ; CHECK-NOT: store i32
218 define void @unroll_1(i32* nocapture %a, i32 %b) {
222 for.body: ; preds = %for.body, %entry
223 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
224 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
225 %0 = load i32, i32* %arrayidx, align 4
226 %inc = add nsw i32 %0, 1
227 store i32 %inc, i32* %arrayidx, align 4
228 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
229 %exitcond = icmp eq i64 %indvars.iv.next, 4
230 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !10
232 for.end: ; preds = %for.body
236 !11 = !{!"llvm.loop.unroll.count", i32 1}
238 ; #pragma clang loop unroll(enable)
239 ; Loop should be fully unrolled.
241 ; CHECK-LABEL: @loop64_with_enable(
243 define void @loop64_with_enable(i32* nocapture %a) {
247 for.body: ; preds = %for.body, %entry
248 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
249 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
250 %0 = load i32, i32* %arrayidx, align 4
251 %inc = add nsw i32 %0, 1
252 store i32 %inc, i32* %arrayidx, align 4
253 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
254 %exitcond = icmp eq i64 %indvars.iv.next, 64
255 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !12
257 for.end: ; preds = %for.body
261 !13 = !{!"llvm.loop.unroll.enable"}
263 ; #pragma clang loop unroll(enable)
264 ; Loop has a runtime trip count and should be runtime unrolled and duplicated
265 ; (original and 8x) if remainder is allowed, otherwise it should not be
268 ; CHECK-LABEL: @runtime_loop_with_enable(
278 ; CHECK-NOT: store i32
280 ; REM: for.body.epil:
281 ; NOREM-NOT: for.body.epil:
286 define void @runtime_loop_with_enable(i32* nocapture %a, i32 %b) {
288 %cmp3 = icmp sgt i32 %b, 0
289 br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !8
291 for.body: ; preds = %entry, %for.body
292 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
293 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
294 %0 = load i32, i32* %arrayidx, align 4
295 %inc = add nsw i32 %0, 1
296 store i32 %inc, i32* %arrayidx, align 4
297 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
298 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
299 %exitcond = icmp eq i32 %lftr.wideiv, %b
300 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !14
302 for.end: ; preds = %for.body, %entry
307 ; #pragma clang loop unroll_count(3)
308 ; Loop has a runtime trip count. Runtime unrolling should occur and loop
309 ; should be duplicated (original and 3x unrolled) if remainder is allowed,
310 ; otherwise it should not be unrolled.
312 ; CHECK-LABEL: @runtime_loop_with_count3(
319 ; REM: for.body.epil:
321 ; NOREM-NOT: for.body.epil:
325 define void @runtime_loop_with_count3(i32* nocapture %a, i32 %b) {
327 %cmp3 = icmp sgt i32 %b, 0
328 br i1 %cmp3, label %for.body, label %for.end, !llvm.loop !16
330 for.body: ; preds = %entry, %for.body
331 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
332 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
333 %0 = load i32, i32* %arrayidx, align 4
334 %inc = add nsw i32 %0, 1
335 store i32 %inc, i32* %arrayidx, align 4
336 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
337 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
338 %exitcond = icmp eq i32 %lftr.wideiv, %b
339 br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !15
341 for.end: ; preds = %for.body, %entry
345 !16 = !{!"llvm.loop.unroll.count", i32 3}