1 ; RUN: opt -data-layout=A5 -mtriple=amdgcn-unknown-amdhsa -loop-unroll -S -amdgpu-unroll-threshold-private=20000 %s | FileCheck %s
3 ; Check that we full unroll loop to be able to eliminate alloca
4 ; CHECK-LABEL: @non_invariant_ind
7 ; CHECK: store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
10 define amdgpu_kernel void @non_invariant_ind(i32 addrspace(1)* nocapture %a, i32 %x) {
12 %arr = alloca [64 x i32], align 4, addrspace(5)
13 %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
16 for.cond.cleanup: ; preds = %for.body
17 %arrayidx5 = getelementptr inbounds [64 x i32], [64 x i32] addrspace(5)* %arr, i32 0, i32 %x
18 %tmp15 = load i32, i32 addrspace(5)* %arrayidx5, align 4
19 %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
20 store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
23 for.body: ; preds = %for.body, %entry
24 %i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
25 %idxprom = sext i32 %i.015 to i64
26 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
27 %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
28 %add = add nsw i32 %i.015, %tmp1
29 %rem = srem i32 %add, 64
30 %arrayidx3 = getelementptr inbounds [64 x i32], [64 x i32] addrspace(5)* %arr, i32 0, i32 %rem
31 store i32 %tmp16, i32 addrspace(5)* %arrayidx3, align 4
32 %inc = add nuw nsw i32 %i.015, 1
33 %exitcond = icmp eq i32 %inc, 100
34 br i1 %exitcond, label %for.cond.cleanup, label %for.body
37 ; Check that we unroll inner loop but not outer
38 ; CHECK-LABEL: @invariant_ind
39 ; CHECK: %[[exitcond:[^ ]+]] = icmp eq i32 %{{.*}}, 32
40 ; CHECK: br i1 %[[exitcond]]
41 ; CHECK-NOT: icmp eq i32 %{{.*}}, 100
43 define amdgpu_kernel void @invariant_ind(i32 addrspace(1)* nocapture %a, i32 %x) {
45 %arr = alloca [64 x i32], align 4, addrspace(5)
46 %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
47 br label %for.cond2.preheader
49 for.cond2.preheader: ; preds = %for.cond.cleanup5, %entry
50 %i.026 = phi i32 [ 0, %entry ], [ %inc10, %for.cond.cleanup5 ]
51 %idxprom = sext i32 %i.026 to i64
52 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
53 %tmp15 = load i32, i32 addrspace(1)* %arrayidx, align 4
56 for.cond.cleanup: ; preds = %for.cond.cleanup5
57 %arrayidx13 = getelementptr inbounds [64 x i32], [64 x i32] addrspace(5)* %arr, i32 0, i32 %x
58 %tmp16 = load i32, i32 addrspace(5)* %arrayidx13, align 4
59 %arrayidx15 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
60 store i32 %tmp16, i32 addrspace(1)* %arrayidx15, align 4
63 for.cond.cleanup5: ; preds = %for.body6
64 %inc10 = add nuw nsw i32 %i.026, 1
65 %exitcond27 = icmp eq i32 %inc10, 32
66 br i1 %exitcond27, label %for.cond.cleanup, label %for.cond2.preheader
68 for.body6: ; preds = %for.body6, %for.cond2.preheader
69 %j.025 = phi i32 [ 0, %for.cond2.preheader ], [ %inc, %for.body6 ]
70 %add = add nsw i32 %j.025, %tmp1
71 %rem = srem i32 %add, 64
72 %arrayidx8 = getelementptr inbounds [64 x i32], [64 x i32] addrspace(5)* %arr, i32 0, i32 %rem
73 store i32 %tmp15, i32 addrspace(5)* %arrayidx8, align 4
74 %inc = add nuw nsw i32 %j.025, 1
75 %exitcond = icmp eq i32 %inc, 100
76 br i1 %exitcond, label %for.cond.cleanup5, label %for.body6
79 ; Check we do not enforce unroll if alloca is too big
80 ; CHECK-LABEL: @too_big
82 ; CHECK: icmp eq i32 %{{.*}}, 100
85 define amdgpu_kernel void @too_big(i32 addrspace(1)* nocapture %a, i32 %x) {
87 %arr = alloca [256 x i32], align 4, addrspace(5)
88 %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
91 for.cond.cleanup: ; preds = %for.body
92 %arrayidx5 = getelementptr inbounds [256 x i32], [256 x i32] addrspace(5)* %arr, i32 0, i32 %x
93 %tmp15 = load i32, i32 addrspace(5)* %arrayidx5, align 4
94 %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
95 store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
98 for.body: ; preds = %for.body, %entry
99 %i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
100 %idxprom = sext i32 %i.015 to i64
101 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
102 %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
103 %add = add nsw i32 %i.015, %tmp1
104 %rem = srem i32 %add, 64
105 %arrayidx3 = getelementptr inbounds [256 x i32], [256 x i32] addrspace(5)* %arr, i32 0, i32 %rem
106 store i32 %tmp16, i32 addrspace(5)* %arrayidx3, align 4
107 %inc = add nuw nsw i32 %i.015, 1
108 %exitcond = icmp eq i32 %inc, 100
109 br i1 %exitcond, label %for.cond.cleanup, label %for.body
112 ; Check we do not enforce unroll if alloca is dynamic
113 ; CHECK-LABEL: @dynamic_size_alloca(
114 ; CHECK: alloca i32, i32 %n
116 ; CHECK: icmp eq i32 %{{.*}}, 100
119 define amdgpu_kernel void @dynamic_size_alloca(i32 addrspace(1)* nocapture %a, i32 %n, i32 %x) {
121 %arr = alloca i32, i32 %n, align 4, addrspace(5)
122 %tmp1 = tail call i32 @llvm.amdgcn.workitem.id.x() #1
125 for.cond.cleanup: ; preds = %for.body
126 %arrayidx5 = getelementptr inbounds i32, i32 addrspace(5)* %arr, i32 %x
127 %tmp15 = load i32, i32 addrspace(5)* %arrayidx5, align 4
128 %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 %tmp1
129 store i32 %tmp15, i32 addrspace(1)* %arrayidx7, align 4
132 for.body: ; preds = %for.body, %entry
133 %i.015 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
134 %idxprom = sext i32 %i.015 to i64
135 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom
136 %tmp16 = load i32, i32 addrspace(1)* %arrayidx, align 4
137 %add = add nsw i32 %i.015, %tmp1
138 %rem = srem i32 %add, 64
139 %arrayidx3 = getelementptr inbounds i32, i32 addrspace(5)* %arr, i32 %rem
140 store i32 %tmp16, i32 addrspace(5)* %arrayidx3, align 4
141 %inc = add nuw nsw i32 %i.015, 1
142 %exitcond = icmp eq i32 %inc, 100
143 br i1 %exitcond, label %for.cond.cleanup, label %for.body
146 declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #1
148 declare i32 @llvm.amdgcn.workitem.id.x() #1
150 declare i32 @llvm.amdgcn.workgroup.id.x() #1
152 declare i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
154 attributes #1 = { nounwind readnone }