1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=amdgpu-lower-kernel-attributes,instcombine,infer-alignment %s | FileCheck -enable-var-scope -check-prefix=GCN %s
4 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
5 define amdgpu_kernel void @get_local_size_x(ptr addrspace(1) %out) #0 {
6 ; GCN-LABEL: @get_local_size_x(
7 ; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
8 ; GCN-NEXT: [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 12
9 ; GCN-NEXT: [[LOCAL_SIZE:%.*]] = load i16, ptr addrspace(4) [[GEP_LOCAL_SIZE]], align 4
10 ; GCN-NEXT: store i16 [[LOCAL_SIZE]], ptr addrspace(1) [[OUT:%.*]], align 2
13 %group.id = tail call i32 @llvm.amdgcn.workgroup.id.x()
14 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
15 %block.count.x = load i32, ptr addrspace(4) %implicitarg.ptr, align 4
16 %cmp.id.count = icmp ult i32 %group.id, %block.count.x
17 %local.size.offset = select i1 %cmp.id.count, i64 12, i64 18
18 %gep.local.size = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 %local.size.offset
19 %local.size = load i16, ptr addrspace(4) %gep.local.size, align 2
20 store i16 %local.size, ptr addrspace(1) %out
24 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
25 define amdgpu_kernel void @get_local_size_y(ptr addrspace(1) %out) #0 {
26 ; GCN-LABEL: @get_local_size_y(
27 ; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
28 ; GCN-NEXT: [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 14
29 ; GCN-NEXT: [[LOCAL_SIZE:%.*]] = load i16, ptr addrspace(4) [[GEP_LOCAL_SIZE]], align 2
30 ; GCN-NEXT: store i16 [[LOCAL_SIZE]], ptr addrspace(1) [[OUT:%.*]], align 2
33 %group.id = tail call i32 @llvm.amdgcn.workgroup.id.y()
34 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
35 %gep.block.count.y = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 4
36 %block.count.y = load i32, ptr addrspace(4) %gep.block.count.y, align 4
37 %cmp.id.count = icmp ult i32 %group.id, %block.count.y
38 %local.size.offset = select i1 %cmp.id.count, i64 14, i64 20
39 %gep.local.size = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 %local.size.offset
40 %local.size = load i16, ptr addrspace(4) %gep.local.size, align 2
41 store i16 %local.size, ptr addrspace(1) %out
45 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
46 define amdgpu_kernel void @get_local_size_z(ptr addrspace(1) %out) #0 {
47 ; GCN-LABEL: @get_local_size_z(
48 ; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
49 ; GCN-NEXT: [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 16
50 ; GCN-NEXT: [[LOCAL_SIZE:%.*]] = load i16, ptr addrspace(4) [[GEP_LOCAL_SIZE]], align 4
51 ; GCN-NEXT: store i16 [[LOCAL_SIZE]], ptr addrspace(1) [[OUT:%.*]], align 2
54 %group.id = tail call i32 @llvm.amdgcn.workgroup.id.z()
55 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
56 %gep.block.count.z = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 8
57 %block.count.z = load i32, ptr addrspace(4) %gep.block.count.z, align 4
58 %cmp.id.count = icmp ult i32 %group.id, %block.count.z
59 %local.size.offset = select i1 %cmp.id.count, i64 16, i64 22
60 %gep.local.size = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 %local.size.offset
61 %local.size = load i16, ptr addrspace(4) %gep.local.size, align 2
62 store i16 %local.size, ptr addrspace(1) %out
66 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
67 define amdgpu_kernel void @get_remainder_x(ptr addrspace(1) %out) #0 {
68 ; GCN-LABEL: @get_remainder_x(
69 ; GCN-NEXT: store i16 0, ptr addrspace(1) [[OUT:%.*]], align 2
72 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
73 %gep.x = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 18
74 %remainder.x = load i16, ptr addrspace(4) %gep.x, align 2
75 store i16 %remainder.x, ptr addrspace(1) %out
79 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
80 define amdgpu_kernel void @get_remainder_y(ptr addrspace(1) %out) #0 {
81 ; GCN-LABEL: @get_remainder_y(
82 ; GCN-NEXT: store i16 0, ptr addrspace(1) [[OUT:%.*]], align 2
85 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
86 %gep.y = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 18
87 %remainder.y = load i16, ptr addrspace(4) %gep.y, align 2
88 store i16 %remainder.y, ptr addrspace(1) %out
92 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
93 define amdgpu_kernel void @get_remainder_z(ptr addrspace(1) %out) #0 {
94 ; GCN-LABEL: @get_remainder_z(
95 ; GCN-NEXT: store i16 0, ptr addrspace(1) [[OUT:%.*]], align 2
98 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
99 %gep.z = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 18
100 %remainder.z = load i16, ptr addrspace(4) %gep.z, align 2
101 store i16 %remainder.z, ptr addrspace(1) %out
105 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
106 define amdgpu_kernel void @get_work_group_size_x(ptr addrspace(1) %out) #0 {
107 ; GCN-LABEL: @get_work_group_size_x(
108 ; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
109 ; GCN-NEXT: [[GEP_X:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 12
110 ; GCN-NEXT: [[GROUP_SIZE_X:%.*]] = load i16, ptr addrspace(4) [[GEP_X]], align 4
111 ; GCN-NEXT: store i16 [[GROUP_SIZE_X]], ptr addrspace(1) [[OUT:%.*]], align 2
114 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
115 %gep.x = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 12
116 %group.size.x = load i16, ptr addrspace(4) %gep.x, align 2
117 store i16 %group.size.x, ptr addrspace(1) %out
121 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
122 define amdgpu_kernel void @get_work_group_size_y(ptr addrspace(1) %out) #0 {
123 ; GCN-LABEL: @get_work_group_size_y(
124 ; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
125 ; GCN-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 14
126 ; GCN-NEXT: [[GROUP_SIZE_Y:%.*]] = load i16, ptr addrspace(4) [[GEP_Y]], align 2
127 ; GCN-NEXT: store i16 [[GROUP_SIZE_Y]], ptr addrspace(1) [[OUT:%.*]], align 2
130 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
131 %gep.y = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 14
132 %group.size.y = load i16, ptr addrspace(4) %gep.y, align 2
133 store i16 %group.size.y, ptr addrspace(1) %out
137 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
138 define amdgpu_kernel void @get_work_group_size_z(ptr addrspace(1) %out) #0 {
139 ; GCN-LABEL: @get_work_group_size_z(
140 ; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
141 ; GCN-NEXT: [[GEP_Z:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 16
142 ; GCN-NEXT: [[GROUP_SIZE_Z:%.*]] = load i16, ptr addrspace(4) [[GEP_Z]], align 4
143 ; GCN-NEXT: store i16 [[GROUP_SIZE_Z]], ptr addrspace(1) [[OUT:%.*]], align 2
146 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
147 %gep.z = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 16
148 %group.size.z = load i16, ptr addrspace(4) %gep.z, align 2
149 store i16 %group.size.z, ptr addrspace(1) %out
153 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
154 define amdgpu_kernel void @get_work_group_size_x_reqd(ptr addrspace(1) %out) #0 !reqd_work_group_size !0 {
155 ; GCN-LABEL: @get_work_group_size_x_reqd(
156 ; GCN-NEXT: store i16 8, ptr addrspace(1) [[OUT:%.*]], align 2
159 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
160 %gep.x = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 12
161 %group.size.x = load i16, ptr addrspace(4) %gep.x, align 2
162 store i16 %group.size.x, ptr addrspace(1) %out
166 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
167 define amdgpu_kernel void @get_work_group_size_y_reqd(ptr addrspace(1) %out) #0 !reqd_work_group_size !0 {
168 ; GCN-LABEL: @get_work_group_size_y_reqd(
169 ; GCN-NEXT: store i16 16, ptr addrspace(1) [[OUT:%.*]], align 2
172 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
173 %gep.y = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 14
174 %group.size.y = load i16, ptr addrspace(4) %gep.y, align 2
175 store i16 %group.size.y, ptr addrspace(1) %out
179 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
180 define amdgpu_kernel void @get_work_group_size_z_reqd(ptr addrspace(1) %out) #0 !reqd_work_group_size !0 {
181 ; GCN-LABEL: @get_work_group_size_z_reqd(
182 ; GCN-NEXT: store i16 2, ptr addrspace(1) [[OUT:%.*]], align 2
185 %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
186 %gep.z = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 16
187 %group.size.z = load i16, ptr addrspace(4) %gep.z, align 2
188 store i16 %group.size.z, ptr addrspace(1) %out
193 declare ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #1
194 declare i32 @llvm.amdgcn.workgroup.id.x() #1
195 declare i32 @llvm.amdgcn.workgroup.id.y() #1
196 declare i32 @llvm.amdgcn.workgroup.id.z() #1
198 !llvm.module.flags = !{!1}
200 attributes #0 = { nounwind "uniform-work-group-size"="true" }
201 attributes #1 = { nounwind readnone speculatable }
202 !0 = !{i32 8, i32 16, i32 2}
203 !1 = !{i32 1, !"amdhsa_code_object_version", i32 500}