1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
2 ; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefixes=CHECK,AKF_CHECK %s
3 ; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-attributor < %s | FileCheck -check-prefixes=CHECK,ATTRIBUTOR_CHECK %s
5 declare i32 @llvm.r600.read.tgid.x() #0
6 declare i32 @llvm.r600.read.tgid.y() #0
7 declare i32 @llvm.r600.read.tgid.z() #0
9 declare i32 @llvm.r600.read.tidig.x() #0
10 declare i32 @llvm.r600.read.tidig.y() #0
11 declare i32 @llvm.r600.read.tidig.z() #0
13 declare i32 @llvm.r600.read.local.size.x() #0
14 declare i32 @llvm.r600.read.local.size.y() #0
15 declare i32 @llvm.r600.read.local.size.z() #0
17 define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
18 ; CHECK-LABEL: define {{[^@]+}}@use_tgid_x
19 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
20 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.x()
21 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
22 ; CHECK-NEXT: ret void
24 %val = call i32 @llvm.r600.read.tgid.x()
25 store i32 %val, i32 addrspace(1)* %ptr
29 define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
30 ; CHECK-LABEL: define {{[^@]+}}@use_tgid_y
31 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
32 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.y()
33 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
34 ; CHECK-NEXT: ret void
36 %val = call i32 @llvm.r600.read.tgid.y()
37 store i32 %val, i32 addrspace(1)* %ptr
41 define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
42 ; CHECK-LABEL: define {{[^@]+}}@multi_use_tgid_y
43 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
44 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
45 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
46 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
47 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
48 ; CHECK-NEXT: ret void
50 %val0 = call i32 @llvm.r600.read.tgid.y()
51 store volatile i32 %val0, i32 addrspace(1)* %ptr
52 %val1 = call i32 @llvm.r600.read.tgid.y()
53 store volatile i32 %val1, i32 addrspace(1)* %ptr
57 define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
58 ; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y
59 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
60 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
61 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
62 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
63 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
64 ; CHECK-NEXT: ret void
66 %val0 = call i32 @llvm.r600.read.tgid.x()
67 %val1 = call i32 @llvm.r600.read.tgid.y()
68 store volatile i32 %val0, i32 addrspace(1)* %ptr
69 store volatile i32 %val1, i32 addrspace(1)* %ptr
73 define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
74 ; CHECK-LABEL: define {{[^@]+}}@use_tgid_z
75 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
76 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.z()
77 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
78 ; CHECK-NEXT: ret void
80 %val = call i32 @llvm.r600.read.tgid.z()
81 store i32 %val, i32 addrspace(1)* %ptr
85 define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
86 ; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_z
87 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3]] {
88 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
89 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
90 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
91 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
92 ; CHECK-NEXT: ret void
94 %val0 = call i32 @llvm.r600.read.tgid.x()
95 %val1 = call i32 @llvm.r600.read.tgid.z()
96 store volatile i32 %val0, i32 addrspace(1)* %ptr
97 store volatile i32 %val1, i32 addrspace(1)* %ptr
101 define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
102 ; CHECK-LABEL: define {{[^@]+}}@use_tgid_y_z
103 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
104 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
105 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
106 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
107 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
108 ; CHECK-NEXT: ret void
110 %val0 = call i32 @llvm.r600.read.tgid.y()
111 %val1 = call i32 @llvm.r600.read.tgid.z()
112 store volatile i32 %val0, i32 addrspace(1)* %ptr
113 store volatile i32 %val1, i32 addrspace(1)* %ptr
117 define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
118 ; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y_z
119 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4]] {
120 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
121 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
122 ; CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tgid.z()
123 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
124 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
125 ; CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
126 ; CHECK-NEXT: ret void
128 %val0 = call i32 @llvm.r600.read.tgid.x()
129 %val1 = call i32 @llvm.r600.read.tgid.y()
130 %val2 = call i32 @llvm.r600.read.tgid.z()
131 store volatile i32 %val0, i32 addrspace(1)* %ptr
132 store volatile i32 %val1, i32 addrspace(1)* %ptr
133 store volatile i32 %val2, i32 addrspace(1)* %ptr
137 define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
138 ; CHECK-LABEL: define {{[^@]+}}@use_tidig_x
139 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
140 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.x()
141 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
142 ; CHECK-NEXT: ret void
144 %val = call i32 @llvm.r600.read.tidig.x()
145 store i32 %val, i32 addrspace(1)* %ptr
149 define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
150 ; CHECK-LABEL: define {{[^@]+}}@use_tidig_y
151 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR5:[0-9]+]] {
152 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.y()
153 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
154 ; CHECK-NEXT: ret void
156 %val = call i32 @llvm.r600.read.tidig.y()
157 store i32 %val, i32 addrspace(1)* %ptr
161 define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
162 ; CHECK-LABEL: define {{[^@]+}}@use_tidig_z
163 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR6:[0-9]+]] {
164 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.z()
165 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
166 ; CHECK-NEXT: ret void
168 %val = call i32 @llvm.r600.read.tidig.z()
169 store i32 %val, i32 addrspace(1)* %ptr
173 define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
174 ; CHECK-LABEL: define {{[^@]+}}@use_tidig_x_tgid_x
175 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
176 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
177 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.x()
178 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
179 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
180 ; CHECK-NEXT: ret void
182 %val0 = call i32 @llvm.r600.read.tidig.x()
183 %val1 = call i32 @llvm.r600.read.tgid.x()
184 store volatile i32 %val0, i32 addrspace(1)* %ptr
185 store volatile i32 %val1, i32 addrspace(1)* %ptr
189 define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
190 ; CHECK-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
191 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
192 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.y()
193 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
194 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
195 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
196 ; CHECK-NEXT: ret void
198 %val0 = call i32 @llvm.r600.read.tidig.y()
199 %val1 = call i32 @llvm.r600.read.tgid.y()
200 store volatile i32 %val0, i32 addrspace(1)* %ptr
201 store volatile i32 %val1, i32 addrspace(1)* %ptr
205 define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
206 ; CHECK-LABEL: define {{[^@]+}}@use_tidig_x_y_z
207 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR8:[0-9]+]] {
208 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
209 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
210 ; CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
211 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
212 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
213 ; CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
214 ; CHECK-NEXT: ret void
216 %val0 = call i32 @llvm.r600.read.tidig.x()
217 %val1 = call i32 @llvm.r600.read.tidig.y()
218 %val2 = call i32 @llvm.r600.read.tidig.z()
219 store volatile i32 %val0, i32 addrspace(1)* %ptr
220 store volatile i32 %val1, i32 addrspace(1)* %ptr
221 store volatile i32 %val2, i32 addrspace(1)* %ptr
225 define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
226 ; CHECK-LABEL: define {{[^@]+}}@use_all_workitems
227 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR9:[0-9]+]] {
228 ; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
229 ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
230 ; CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
231 ; CHECK-NEXT: [[VAL3:%.*]] = call i32 @llvm.r600.read.tgid.x()
232 ; CHECK-NEXT: [[VAL4:%.*]] = call i32 @llvm.r600.read.tgid.y()
233 ; CHECK-NEXT: [[VAL5:%.*]] = call i32 @llvm.r600.read.tgid.z()
234 ; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
235 ; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
236 ; CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
237 ; CHECK-NEXT: store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
238 ; CHECK-NEXT: store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
239 ; CHECK-NEXT: store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
240 ; CHECK-NEXT: ret void
242 %val0 = call i32 @llvm.r600.read.tidig.x()
243 %val1 = call i32 @llvm.r600.read.tidig.y()
244 %val2 = call i32 @llvm.r600.read.tidig.z()
245 %val3 = call i32 @llvm.r600.read.tgid.x()
246 %val4 = call i32 @llvm.r600.read.tgid.y()
247 %val5 = call i32 @llvm.r600.read.tgid.z()
248 store volatile i32 %val0, i32 addrspace(1)* %ptr
249 store volatile i32 %val1, i32 addrspace(1)* %ptr
250 store volatile i32 %val2, i32 addrspace(1)* %ptr
251 store volatile i32 %val3, i32 addrspace(1)* %ptr
252 store volatile i32 %val4, i32 addrspace(1)* %ptr
253 store volatile i32 %val5, i32 addrspace(1)* %ptr
257 define amdgpu_kernel void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
258 ; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_x
259 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
260 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.x()
261 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
262 ; CHECK-NEXT: ret void
264 %val = call i32 @llvm.r600.read.local.size.x()
265 store i32 %val, i32 addrspace(1)* %ptr
269 define amdgpu_kernel void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
270 ; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_y
271 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
272 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.y()
273 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
274 ; CHECK-NEXT: ret void
276 %val = call i32 @llvm.r600.read.local.size.y()
277 store i32 %val, i32 addrspace(1)* %ptr
281 define amdgpu_kernel void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
282 ; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_z
283 ; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
284 ; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.z()
285 ; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
286 ; CHECK-NEXT: ret void
288 %val = call i32 @llvm.r600.read.local.size.z()
289 store i32 %val, i32 addrspace(1)* %ptr
293 attributes #0 = { nounwind readnone }
294 attributes #1 = { nounwind }
296 ; ALL: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn "uniform-work-group-size"="false" }
297 ; ALL: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
298 ; ALL: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
299 ; ALL: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
300 ; ALL: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
301 ; ALL: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
302 ; ALL: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
303 ; ALL: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
304 ; ALL: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
305 ; ALL: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
306 ; NOHSA: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn "uniform-work-group-size"="false" }
307 ; NOHSA: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
308 ; NOHSA: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
309 ; NOHSA: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
310 ; NOHSA: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
311 ; NOHSA: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
312 ; NOHSA: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
313 ; NOHSA: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
314 ; NOHSA: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
315 ; NOHSA: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
317 ; AKF_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
318 ; AKF_CHECK: attributes #[[ATTR1]] = { nounwind }
319 ; AKF_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" }
320 ; AKF_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" }
321 ; AKF_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" }
322 ; AKF_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" }
323 ; AKF_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" }
324 ; AKF_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" }
325 ; AKF_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" }
326 ; AKF_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" }
328 ; ATTRIBUTOR_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
329 ; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
330 ; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
331 ; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
332 ; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
333 ; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
334 ; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
335 ; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
336 ; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
337 ; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }