1 // RUN: mlir-opt -pass-pipeline="builtin.module(gpu.module(convert-gpu-to-llvm-spv{use-64bit-index=true}))" -split-input-file -verify-diagnostics %s \
2 // RUN: | FileCheck --check-prefixes=CHECK-64,CHECK %s
3 // RUN: mlir-opt -pass-pipeline="builtin.module(gpu.module(convert-gpu-to-llvm-spv))" -split-input-file -verify-diagnostics %s \
4 // RUN: | FileCheck --check-prefixes=CHECK-32,CHECK %s
5 // RUN: mlir-opt -pass-pipeline="builtin.module(gpu.module(convert-gpu-to-llvm-spv{use-64bit-index=false}))" -split-input-file -verify-diagnostics %s \
6 // RUN: | FileCheck --check-prefixes=CHECK-32,CHECK %s
9 // CHECK-64: llvm.func spir_funccc @_Z14get_num_groupsj(i32) -> i64 attributes {
10 // CHECK-32: llvm.func spir_funccc @_Z14get_num_groupsj(i32) -> i32 attributes {
11 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
12 // CHECK-SAME-DAG: no_unwind
13 // CHECK-SAME-DAG: will_return
14 // CHECK-NOT: convergent
16 // CHECK-64: llvm.func spir_funccc @_Z12get_local_idj(i32) -> i64 attributes {
17 // CHECK-32: llvm.func spir_funccc @_Z12get_local_idj(i32) -> i32 attributes {
18 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
19 // CHECK-SAME-DAG: no_unwind
20 // CHECK-SAME-DAG: will_return
21 // CHECK-NOT: convergent
23 // CHECK-64: llvm.func spir_funccc @_Z14get_local_sizej(i32) -> i64 attributes {
24 // CHECK-32: llvm.func spir_funccc @_Z14get_local_sizej(i32) -> i32 attributes {
25 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
26 // CHECK-SAME-DAG: no_unwind
27 // CHECK-SAME-DAG: will_return
28 // CHECK-NOT: convergent
30 // CHECK-64: llvm.func spir_funccc @_Z13get_global_idj(i32) -> i64 attributes {
31 // CHECK-32: llvm.func spir_funccc @_Z13get_global_idj(i32) -> i32 attributes {
32 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
33 // CHECK-SAME-DAG: no_unwind
34 // CHECK-SAME-DAG: will_return
35 // CHECK-NOT: convergent
37 // CHECK-64: llvm.func spir_funccc @_Z12get_group_idj(i32) -> i64 attributes {
38 // CHECK-32: llvm.func spir_funccc @_Z12get_group_idj(i32) -> i32 attributes {
39 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
40 // CHECK-SAME-DAG: no_unwind
41 // CHECK-SAME-DAG: will_return
42 // CHECK-NOT: convergent
45 // CHECK-LABEL: gpu_block_id
46 func.func @gpu_block_id() -> (index, index, index) {
47 // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
48 // CHECK: llvm.call spir_funccc @_Z12get_group_idj([[C0]]) {
49 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
50 // CHECK-SAME-DAG: no_unwind
51 // CHECK-SAME-DAG: will_return
52 // CHECK-NOT: convergent
53 // CHECK-64-SAME: } : (i32) -> i64
54 // CHECK-32-SAME: } : (i32) -> i32
55 %block_id_x = gpu.block_id x
56 // CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
57 // CHECK: llvm.call spir_funccc @_Z12get_group_idj([[C1]]) {
58 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
59 // CHECK-SAME-DAG: no_unwind
60 // CHECK-SAME-DAG: will_return
61 // CHECK-NOT: convergent
62 // CHECK-64-SAME: } : (i32) -> i64
63 // CHECK-32-SAME: } : (i32) -> i32
64 %block_id_y = gpu.block_id y
65 // CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
66 // CHECK: llvm.call spir_funccc @_Z12get_group_idj([[C2]]) {
67 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
68 // CHECK-SAME-DAG: no_unwind
69 // CHECK-SAME-DAG: will_return
70 // CHECK-NOT: convergent
71 // CHECK-64-SAME: } : (i32) -> i64
72 // CHECK-32-SAME: } : (i32) -> i32
73 %block_id_z = gpu.block_id z
74 return %block_id_x, %block_id_y, %block_id_z : index, index, index
77 // CHECK-LABEL: gpu_global_id
78 func.func @gpu_global_id() -> (index, index, index) {
79 // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
80 // CHECK: llvm.call spir_funccc @_Z13get_global_idj([[C0]]) {
81 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
82 // CHECK-SAME-DAG: no_unwind
83 // CHECK-SAME-DAG: will_return
84 // CHECK-NOT: convergent
85 // CHECK-64-SAME: } : (i32) -> i64
86 // CHECK-32-SAME: } : (i32) -> i32
87 %global_id_x = gpu.global_id x
88 // CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
89 // CHECK: llvm.call spir_funccc @_Z13get_global_idj([[C1]]) {
90 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
91 // CHECK-SAME-DAG: no_unwind
92 // CHECK-SAME-DAG: will_return
93 // CHECK-NOT: convergent
94 // CHECK-64-SAME: } : (i32) -> i64
95 // CHECK-32-SAME: } : (i32) -> i32
96 %global_id_y = gpu.global_id y
97 // CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
98 // CHECK: llvm.call spir_funccc @_Z13get_global_idj([[C2]]) {
99 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
100 // CHECK-SAME-DAG: no_unwind
101 // CHECK-SAME-DAG: will_return
102 // CHECK-NOT: convergent
103 // CHECK-64-SAME: } : (i32) -> i64
104 // CHECK-32-SAME: } : (i32) -> i32
105 %global_id_z = gpu.global_id z
106 return %global_id_x, %global_id_y, %global_id_z : index, index, index
109 // CHECK-LABEL: gpu_block_dim
110 func.func @gpu_block_dim() -> (index, index, index) {
111 // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
112 // CHECK: llvm.call spir_funccc @_Z14get_local_sizej([[C0]]) {
113 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
114 // CHECK-SAME-DAG: no_unwind
115 // CHECK-SAME-DAG: will_return
116 // CHECK-NOT: convergent
117 // CHECK-64-SAME: } : (i32) -> i64
118 // CHECK-32-SAME: } : (i32) -> i32
119 %block_dim_x = gpu.block_dim x
120 // CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
121 // CHECK: llvm.call spir_funccc @_Z14get_local_sizej([[C1]]) {
122 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
123 // CHECK-SAME-DAG: no_unwind
124 // CHECK-SAME-DAG: will_return
125 // CHECK-NOT: convergent
126 // CHECK-64-SAME: } : (i32) -> i64
127 // CHECK-32-SAME: } : (i32) -> i32
128 %block_dim_y = gpu.block_dim y
129 // CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
130 // CHECK: llvm.call spir_funccc @_Z14get_local_sizej([[C2]]) {
131 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
132 // CHECK-SAME-DAG: no_unwind
133 // CHECK-SAME-DAG: will_return
134 // CHECK-NOT: convergent
135 // CHECK-64-SAME: } : (i32) -> i64
136 // CHECK-32-SAME: } : (i32) -> i32
137 %block_dim_z = gpu.block_dim z
138 return %block_dim_x, %block_dim_y, %block_dim_z : index, index, index
141 // CHECK-LABEL: gpu_thread_id
142 func.func @gpu_thread_id() -> (index, index, index) {
143 // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
144 // CHECK: llvm.call spir_funccc @_Z12get_local_idj([[C0]]) {
145 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
146 // CHECK-SAME-DAG: no_unwind
147 // CHECK-SAME-DAG: will_return
148 // CHECK-NOT: convergent
149 // CHECK-64-SAME: } : (i32) -> i64
150 // CHECK-32-SAME: } : (i32) -> i32
151 %thread_id_x = gpu.thread_id x
152 // CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
153 // CHECK: llvm.call spir_funccc @_Z12get_local_idj([[C1]]) {
154 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
155 // CHECK-SAME-DAG: no_unwind
156 // CHECK-SAME-DAG: will_return
157 // CHECK-NOT: convergent
158 // CHECK-64-SAME: } : (i32) -> i64
159 // CHECK-32-SAME: } : (i32) -> i32
160 %thread_id_y = gpu.thread_id y
161 // CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
162 // CHECK: llvm.call spir_funccc @_Z12get_local_idj([[C2]]) {
163 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
164 // CHECK-SAME-DAG: no_unwind
165 // CHECK-SAME-DAG: will_return
166 // CHECK-NOT: convergent
167 // CHECK-64-SAME: } : (i32) -> i64
168 // CHECK-32-SAME: } : (i32) -> i32
169 %thread_id_z = gpu.thread_id z
170 return %thread_id_x, %thread_id_y, %thread_id_z : index, index, index
173 // CHECK-LABEL: gpu_grid_dim
174 func.func @gpu_grid_dim() -> (index, index, index) {
175 // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
176 // CHECK: llvm.call spir_funccc @_Z14get_num_groupsj([[C0]]) {
177 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
178 // CHECK-SAME-DAG: no_unwind
179 // CHECK-SAME-DAG: will_return
180 // CHECK-NOT: convergent
181 // CHECK-64-SAME: } : (i32) -> i64
182 // CHECK-32-SAME: } : (i32) -> i32
183 %grid_dim_x = gpu.grid_dim x
184 // CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
185 // CHECK: llvm.call spir_funccc @_Z14get_num_groupsj([[C1]]) {
186 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
187 // CHECK-SAME-DAG: no_unwind
188 // CHECK-SAME-DAG: will_return
189 // CHECK-NOT: convergent
190 // CHECK-64-SAME: } : (i32) -> i64
191 // CHECK-32-SAME: } : (i32) -> i32
192 %grid_dim_y = gpu.grid_dim y
193 // CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
194 // CHECK: llvm.call spir_funccc @_Z14get_num_groupsj([[C2]]) {
195 // CHECK-SAME-DAG: memory_effects = #llvm.memory_effects<other = none, argMem = none, inaccessibleMem = none>
196 // CHECK-SAME-DAG: no_unwind
197 // CHECK-SAME-DAG: will_return
198 // CHECK-NOT: convergent
199 // CHECK-64-SAME: } : (i32) -> i64
200 // CHECK-32-SAME: } : (i32) -> i32
201 %grid_dim_z = gpu.grid_dim z
202 return %grid_dim_x, %grid_dim_y, %grid_dim_z : index, index, index
208 gpu.module @barriers {
209 // CHECK: llvm.func spir_funccc @_Z7barrierj(i32) attributes {
210 // CHECK-SAME-DAG: no_unwind
211 // CHECK-SAME-DAG: convergent
212 // CHECK-SAME-DAG: will_return
213 // CHECK-NOT: memory_effects = #llvm.memory_effects
216 // CHECK-LABEL: gpu_barrier
217 func.func @gpu_barrier() {
218 // CHECK: [[FLAGS:%.*]] = llvm.mlir.constant(1 : i32) : i32
219 // CHECK: llvm.call spir_funccc @_Z7barrierj([[FLAGS]]) {
220 // CHECK-SAME-DAG: no_unwind
221 // CHECK-SAME-DAG: convergent
222 // CHECK-SAME-DAG: will_return
223 // CHECK-NOT: memory_effects = #llvm.memory_effects
224 // CHECK-SAME: } : (i32) -> ()
232 // Check `gpu.shuffle` conversion with explicit subgroup size.
234 gpu.module @shuffles {
235 // CHECK: llvm.func spir_funccc @_Z22sub_group_shuffle_downdj(f64, i32) -> f64 attributes {
236 // CHECK-SAME-DAG: no_unwind
237 // CHECK-SAME-DAG: convergent
238 // CHECK-SAME-DAG: will_return
239 // CHECK-NOT: memory_effects = #llvm.memory_effects
241 // CHECK: llvm.func spir_funccc @_Z20sub_group_shuffle_upfj(f32, i32) -> f32 attributes {
242 // CHECK-SAME-DAG: no_unwind
243 // CHECK-SAME-DAG: convergent
244 // CHECK-SAME-DAG: will_return
245 // CHECK-NOT: memory_effects = #llvm.memory_effects
247 // CHECK: llvm.func spir_funccc @_Z20sub_group_shuffle_upDhj(f16, i32) -> f16 attributes {
248 // CHECK-SAME-DAG: no_unwind
249 // CHECK-SAME-DAG: convergent
250 // CHECK-SAME-DAG: will_return
251 // CHECK-NOT: memory_effects = #llvm.memory_effects
253 // CHECK: llvm.func spir_funccc @_Z21sub_group_shuffle_xorlj(i64, i32) -> i64 attributes {
254 // CHECK-SAME-DAG: no_unwind
255 // CHECK-SAME-DAG: convergent
256 // CHECK-SAME-DAG: will_return
257 // CHECK-NOT: memory_effects = #llvm.memory_effects
259 // CHECK: llvm.func spir_funccc @_Z17sub_group_shuffleij(i32, i32) -> i32 attributes {
260 // CHECK-SAME-DAG: no_unwind
261 // CHECK-SAME-DAG: convergent
262 // CHECK-SAME-DAG: will_return
263 // CHECK-NOT: memory_effects = #llvm.memory_effects
265 // CHECK: llvm.func spir_funccc @_Z21sub_group_shuffle_xorsj(i16, i32) -> i16 attributes {
266 // CHECK-SAME-DAG: no_unwind
267 // CHECK-SAME-DAG: convergent
268 // CHECK-SAME-DAG: will_return
269 // CHECK-NOT: memory_effects = #llvm.memory_effects
271 // CHECK: llvm.func spir_funccc @_Z17sub_group_shufflecj(i8, i32) -> i8 attributes {
272 // CHECK-SAME-DAG: no_unwind
273 // CHECK-SAME-DAG: convergent
274 // CHECK-SAME-DAG: will_return
275 // CHECK-NOT: memory_effects = #llvm.memory_effects
278 // CHECK-LABEL: gpu_shuffles
279 // CHECK-SAME: (%[[I8_VAL:.*]]: i8, %[[I16_VAL:.*]]: i16,
280 // CHECK-SAME: %[[I32_VAL:.*]]: i32, %[[I64_VAL:.*]]: i64,
281 // CHECK-SAME: %[[F16_VAL:.*]]: f16, %[[F32_VAL:.*]]: f32,
282 // CHECK-SAME: %[[F64_VAL:.*]]: f64, %[[BF16_VAL:.*]]: bf16,
283 // CHECK-SAME: %[[I1_VAL:.*]]: i1, %[[OFFSET:.*]]: i32)
284 llvm.func @gpu_shuffles(%i8_val: i8,
293 %offset: i32) attributes {intel_reqd_sub_group_size = 16 : i32} {
294 %width = arith.constant 16 : i32
295 // CHECK: llvm.call spir_funccc @_Z17sub_group_shufflecj(%[[I8_VAL]], %[[OFFSET]])
296 // CHECK: llvm.mlir.constant(true) : i1
297 // CHECK: llvm.call spir_funccc @_Z21sub_group_shuffle_xorsj(%[[I16_VAL]], %[[OFFSET]])
298 // CHECK: llvm.mlir.constant(true) : i1
299 // CHECK: llvm.call spir_funccc @_Z17sub_group_shuffleij(%[[I32_VAL]], %[[OFFSET]])
300 // CHECK: llvm.mlir.constant(true) : i1
301 // CHECK: llvm.call spir_funccc @_Z21sub_group_shuffle_xorlj(%[[I64_VAL]], %[[OFFSET]])
302 // CHECK: llvm.mlir.constant(true) : i1
303 // CHECK: llvm.call spir_funccc @_Z20sub_group_shuffle_upDhj(%[[F16_VAL]], %[[OFFSET]])
304 // CHECK: llvm.mlir.constant(true) : i1
305 // CHECK: llvm.call spir_funccc @_Z20sub_group_shuffle_upfj(%[[F32_VAL]], %[[OFFSET]])
306 // CHECK: llvm.mlir.constant(true) : i1
307 // CHECK: llvm.call spir_funccc @_Z22sub_group_shuffle_downdj(%[[F64_VAL]], %[[OFFSET]])
308 // CHECK: llvm.mlir.constant(true) : i1
309 // CHECK: %[[BF16_INBC:.*]] = llvm.bitcast %[[BF16_VAL]] : bf16 to i16
310 // CHECK: %[[BF16_CALL:.*]] = llvm.call spir_funccc @_Z22sub_group_shuffle_downsj(%[[BF16_INBC]], %[[OFFSET]])
311 // CHECK: llvm.bitcast %[[BF16_CALL]] : i16 to bf16
312 // CHECK: llvm.mlir.constant(true) : i1
313 // CHECK: %[[I1_ZEXT:.*]] = llvm.zext %[[I1_VAL]] : i1 to i8
314 // CHECK: %[[I1_CALL:.*]] = llvm.call spir_funccc @_Z21sub_group_shuffle_xorcj(%18, %arg9)
315 // CHECK: llvm.trunc %[[I1_CALL:.*]] : i8 to i1
316 // CHECK: llvm.mlir.constant(true) : i1
317 %shuffleResult0, %valid0 = gpu.shuffle idx %i8_val, %offset, %width : i8
318 %shuffleResult1, %valid1 = gpu.shuffle xor %i16_val, %offset, %width : i16
319 %shuffleResult2, %valid2 = gpu.shuffle idx %i32_val, %offset, %width : i32
320 %shuffleResult3, %valid3 = gpu.shuffle xor %i64_val, %offset, %width : i64
321 %shuffleResult4, %valid4 = gpu.shuffle up %f16_val, %offset, %width : f16
322 %shuffleResult5, %valid5 = gpu.shuffle up %f32_val, %offset, %width : f32
323 %shuffleResult6, %valid6 = gpu.shuffle down %f64_val, %offset, %width : f64
324 %shuffleResult7, %valid7 = gpu.shuffle down %bf16_val, %offset, %width : bf16
325 %shuffleResult8, %valid8 = gpu.shuffle xor %i1_val, %offset, %width : i1
332 // Cannot convert due to shuffle width and target subgroup size mismatch
334 gpu.module @shuffles_mismatch {
335 llvm.func @gpu_shuffles(%val: i32, %id: i32) attributes {intel_reqd_sub_group_size = 32 : i32} {
336 %width = arith.constant 16 : i32
337 // expected-error@below {{failed to legalize operation 'gpu.shuffle' that was explicitly marked illegal}}
338 %shuffleResult, %valid = gpu.shuffle idx %val, %id, %width : i32
345 // Cannot convert due to variable shuffle width
347 gpu.module @shuffles_mismatch {
348 llvm.func @gpu_shuffles(%val: i32, %id: i32, %width: i32) attributes {intel_reqd_sub_group_size = 32 : i32} {
349 // expected-error@below {{failed to legalize operation 'gpu.shuffle' that was explicitly marked illegal}}
350 %shuffleResult, %valid = gpu.shuffle idx %val, %id, %width : i32
357 // Cannot convert due to value type not being supported by the conversion
359 gpu.module @not_supported_lowering {
360 llvm.func @gpu_shuffles(%val: f128, %id: i32) attributes {intel_reqd_sub_group_size = 32 : i32} {
361 %width = arith.constant 32 : i32
362 // expected-error@below {{failed to legalize operation 'gpu.shuffle' that was explicitly marked illegal}}
363 %shuffleResult, %valid = gpu.shuffle xor %val, %id, %width : f128
371 gpu.module @kernels {
372 // CHECK: llvm.func spir_funccc @no_kernel() {
373 gpu.func @no_kernel() {
377 // CHECK: llvm.func spir_kernelcc @kernel_no_arg() attributes {gpu.kernel} {
378 gpu.func @kernel_no_arg() kernel {
382 // CHECK: llvm.func spir_kernelcc @kernel_with_args(%{{.*}}: f32, %{{.*}}: i64) attributes {gpu.kernel} {
383 gpu.func @kernel_with_args(%arg0: f32, %arg1: i64) kernel {
387 // CHECK-64: llvm.func spir_kernelcc @kernel_with_conv_args(%{{.*}}: i64, %{{.*}}: !llvm.ptr<1>, %{{.*}}: !llvm.ptr<1>, %{{.*}}: i64) attributes {gpu.kernel} {
388 // CHECK-32: llvm.func spir_kernelcc @kernel_with_conv_args(%{{.*}}: i32, %{{.*}}: !llvm.ptr<1>, %{{.*}}: !llvm.ptr<1>, %{{.*}}: i32) attributes {gpu.kernel} {
389 gpu.func @kernel_with_conv_args(%arg0: index, %arg1: memref<index>) kernel {
393 // CHECK-64: llvm.func spir_kernelcc @kernel_with_sized_memref(%{{.*}}: !llvm.ptr<1>, %{{.*}}: !llvm.ptr<1>, %{{.*}}: i64, %{{.*}}: i64, %{{.*}}: i64) attributes {gpu.kernel} {
394 // CHECK-32: llvm.func spir_kernelcc @kernel_with_sized_memref(%{{.*}}: !llvm.ptr<1>, %{{.*}}: !llvm.ptr<1>, %{{.*}}: i32, %{{.*}}: i32, %{{.*}}: i32) attributes {gpu.kernel} {
395 gpu.func @kernel_with_sized_memref(%arg0: memref<1xindex>) kernel {
399 // CHECK-64: llvm.func spir_kernelcc @kernel_with_ND_memref(%{{.*}}: !llvm.ptr<1>, %{{.*}}: !llvm.ptr<1>, %{{.*}}: i64, %{{.*}}: i64, %{{.*}}: i64, %{{.*}}: i64, %{{.*}}: i64, %{{.*}}: i64, %{{.*}}: i64) attributes {gpu.kernel} {
400 // CHECK-32: llvm.func spir_kernelcc @kernel_with_ND_memref(%{{.*}}: !llvm.ptr<1>, %{{.*}}: !llvm.ptr<1>, %{{.*}}: i32, %{{.*}}: i32, %{{.*}}: i32, %{{.*}}: i32, %{{.*}}: i32, %{{.*}}: i32, %{{.*}}: i32) attributes {gpu.kernel} {
401 gpu.func @kernel_with_ND_memref(%arg0: memref<128x128x128xindex>) kernel {
408 gpu.module @kernels {
409 // CHECK-LABEL: llvm.func spir_kernelcc @kernel_with_private_attributions() attributes {gpu.kernel} {
411 // Private attribution is converted to an llvm.alloca
413 // CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(32 : i64) : i64
414 // CHECK: %[[VAL_3:.*]] = llvm.alloca %[[VAL_2]] x f32 : (i64) -> !llvm.ptr
416 // MemRef descriptor built from allocated pointer
418 // CHECK-64: %[[VAL_4:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
419 // CHECK-32: %[[VAL_4:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i32, array<1 x i32>, array<1 x i32>)>
421 // CHECK: %[[VAL_5:.*]] = llvm.insertvalue %[[VAL_3]], %[[VAL_4]][0]
422 // CHECK: llvm.insertvalue %[[VAL_3]], %[[VAL_5]][1]
424 // Same code as above
426 // CHECK: %[[VAL_14:.*]] = llvm.mlir.constant(16 : i64) : i64
427 // CHECK: %[[VAL_15:.*]] = llvm.alloca %[[VAL_14]] x i16 : (i64) -> !llvm.ptr
429 // CHECK-64: %[[VAL_16:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
430 // CHECK-32: %[[VAL_16:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i32, array<1 x i32>, array<1 x i32>)>
432 // CHECK: %[[VAL_17:.*]] = llvm.insertvalue %[[VAL_15]], %[[VAL_16]][0]
433 // CHECK: llvm.insertvalue %[[VAL_15]], %[[VAL_17]][1]
434 gpu.func @kernel_with_private_attributions()
435 private(%arg2: memref<32xf32, #gpu.address_space<private>>, %arg3: memref<16xi16, #gpu.address_space<private>>)
440 // Workgroup attributions are converted to an llvm.ptr<3> argument
442 // CHECK-LABEL: llvm.func spir_kernelcc @kernel_with_workgoup_attributions(
443 // CHECK-SAME: %[[VAL_29:.*]]: !llvm.ptr<3> {llvm.noalias, llvm.workgroup_attribution = #llvm.mlir.workgroup_attribution<32 : i64, f32>},
444 // CHECK-SAME: %[[VAL_30:.*]]: !llvm.ptr<3> {llvm.noalias, llvm.workgroup_attribution = #llvm.mlir.workgroup_attribution<16 : i64, i16>}) attributes {gpu.kernel} {
446 // MemRef descriptor built from new argument
448 // CHECK-64: %[[VAL_31:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)>
449 // CHECK-32: %[[VAL_31:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<3>, ptr<3>, i32, array<1 x i32>, array<1 x i32>)>
451 // CHECK: %[[VAL_32:.*]] = llvm.insertvalue %[[VAL_29]], %[[VAL_31]][0]
452 // CHECK: llvm.insertvalue %[[VAL_29]], %[[VAL_32]][1]
456 // CHECK-64: %[[VAL_41:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<3>, ptr<3>, i64, array<1 x i64>, array<1 x i64>)>
457 // CHECK-32: %[[VAL_41:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<3>, ptr<3>, i32, array<1 x i32>, array<1 x i32>)>
459 // CHECK: %[[VAL_42:.*]] = llvm.insertvalue %[[VAL_30]], %[[VAL_41]][0]
460 // CHECK: llvm.insertvalue %[[VAL_30]], %[[VAL_42]][1]
461 gpu.func @kernel_with_workgoup_attributions()
462 workgroup(%arg2: memref<32xf32, #gpu.address_space<workgroup>>, %arg3: memref<16xi16, #gpu.address_space<workgroup>>)
467 // Check with both private and workgroup attributions. Simply check additional
468 // arguments and a llvm.alloca are present.
470 // CHECK-LABEL: llvm.func spir_kernelcc @kernel_with_both_attributions(
471 // CHECK-SAME: %{{.*}}: !llvm.ptr<3> {llvm.noalias, llvm.workgroup_attribution = #llvm.mlir.workgroup_attribution<8 : i64, f32>},
472 // CHECK-64-SAME: %{{.*}}: !llvm.ptr<3> {llvm.noalias, llvm.workgroup_attribution = #llvm.mlir.workgroup_attribution<16 : i64, i64>}) attributes {gpu.kernel} {
473 // CHECK-32-SAME: %{{.*}}: !llvm.ptr<3> {llvm.noalias, llvm.workgroup_attribution = #llvm.mlir.workgroup_attribution<16 : i64, i32>}) attributes {gpu.kernel} {
475 // CHECK: %[[VAL_79:.*]] = llvm.mlir.constant(32 : i64) : i64
476 // CHECK: %[[VAL_80:.*]] = llvm.alloca %[[VAL_79]] x i32 : (i64) -> !llvm.ptr
478 // CHECK: %[[VAL_91:.*]] = llvm.mlir.constant(32 : i64) : i64
479 // CHECK-64: %[[VAL_92:.*]] = llvm.alloca %[[VAL_91]] x i64 : (i64) -> !llvm.ptr
480 // CHECK-32: %[[VAL_92:.*]] = llvm.alloca %[[VAL_91]] x i32 : (i64) -> !llvm.ptr
481 gpu.func @kernel_with_both_attributions()
482 workgroup(%arg4: memref<8xf32, #gpu.address_space<workgroup>>, %arg5: memref<16xindex, #gpu.address_space<workgroup>>)
483 private(%arg6: memref<32xi32, #gpu.address_space<private>>, %arg7: memref<32xindex, #gpu.address_space<private>>)
488 // CHECK-LABEL: llvm.func spir_kernelcc @kernel_known_block_size
489 // CHECK-SAME: reqd_work_group_size = array<i32: 128, 128, 256>
490 gpu.func @kernel_known_block_size() kernel attributes {known_block_size = array<i32: 128, 128, 256>} {
497 gpu.module @kernels {
498 // CHECK-LABEL: llvm.func spir_funccc @address_spaces(
499 // CHECK-SAME: {{.*}}: !llvm.ptr<1>
500 // CHECK-SAME: {{.*}}: !llvm.ptr<3>
501 // CHECK-SAME: {{.*}}: !llvm.ptr
502 gpu.func @address_spaces(%arg0: memref<f32, #gpu.address_space<global>>, %arg1: memref<f32, #gpu.address_space<workgroup>>, %arg2: memref<f32, #gpu.address_space<private>>) {
509 gpu.module @kernels {
510 // CHECK: llvm.func spir_funccc @_Z12get_group_idj(i32)
511 // CHECK-LABEL: llvm.func spir_funccc @no_address_spaces(
512 // CHECK-SAME: %{{[a-zA-Z_][a-zA-Z0-9_]*}}: !llvm.ptr<1>
513 // CHECK-SAME: %{{[a-zA-Z_][a-zA-Z0-9_]*}}: !llvm.ptr<1>
514 // CHECK-SAME: %{{[a-zA-Z_][a-zA-Z0-9_]*}}: !llvm.ptr<1>
515 gpu.func @no_address_spaces(%arg0: memref<f32>, %arg1: memref<f32, #gpu.address_space<global>>, %arg2: memref<f32>) {
519 // CHECK-LABEL: llvm.func spir_kernelcc @no_address_spaces_complex(
520 // CHECK-SAME: %{{[a-zA-Z_][a-zA-Z0-9_]*}}: !llvm.ptr<1>
521 // CHECK-SAME: %{{[a-zA-Z_][a-zA-Z0-9_]*}}: !llvm.ptr<1>
522 // CHECK: func.call @no_address_spaces_callee(%{{[0-9]+}}, %{{[0-9]+}})
523 // CHECK-SAME: : (memref<2x2xf32, 1>, memref<4xf32, 1>)
524 gpu.func @no_address_spaces_complex(%arg0: memref<2x2xf32>, %arg1: memref<4xf32>) kernel {
525 func.call @no_address_spaces_callee(%arg0, %arg1) : (memref<2x2xf32>, memref<4xf32>) -> ()
528 // CHECK-LABEL: func.func @no_address_spaces_callee(
529 // CHECK-SAME: [[ARG0:%.*]]: memref<2x2xf32, 1>
530 // CHECK-SAME: [[ARG1:%.*]]: memref<4xf32, 1>
531 // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
532 // CHECK: [[I0:%.*]] = llvm.call spir_funccc @_Z12get_group_idj([[C0]]) {
533 // CHECK-32: [[I1:%.*]] = builtin.unrealized_conversion_cast [[I0]] : i32 to index
534 // CHECK-64: [[I1:%.*]] = builtin.unrealized_conversion_cast [[I0]] : i64 to index
535 // CHECK: [[LD:%.*]] = memref.load [[ARG0]]{{\[}}[[I1]], [[I1]]{{\]}} : memref<2x2xf32, 1>
536 // CHECK: memref.store [[LD]], [[ARG1]]{{\[}}[[I1]]{{\]}} : memref<4xf32, 1>
537 func.func @no_address_spaces_callee(%arg0: memref<2x2xf32>, %arg1: memref<4xf32>) {
538 %block_id = gpu.block_id x
539 %0 = memref.load %arg0[%block_id, %block_id] : memref<2x2xf32>
540 memref.store %0, %arg1[%block_id] : memref<4xf32>
547 // Lowering of subgroup query operations
549 // CHECK-DAG: llvm.func spir_funccc @_Z18get_sub_group_size() -> i32 attributes {no_unwind, will_return}
550 // CHECK-DAG: llvm.func spir_funccc @_Z18get_num_sub_groups() -> i32 attributes {no_unwind, will_return}
551 // CHECK-DAG: llvm.func spir_funccc @_Z22get_sub_group_local_id() -> i32 attributes {no_unwind, will_return}
552 // CHECK-DAG: llvm.func spir_funccc @_Z16get_sub_group_id() -> i32 attributes {no_unwind, will_return}
555 gpu.module @subgroup_operations {
556 // CHECK-LABEL: @gpu_subgroup
557 func.func @gpu_subgroup() {
558 // CHECK: %[[SG_ID:.*]] = llvm.call spir_funccc @_Z16get_sub_group_id() {no_unwind, will_return} : () -> i32
559 // CHECK-32-NOT: llvm.zext
560 // CHECK-64 %{{.*}} = llvm.zext %[[SG_ID]] : i32 to i64
561 %0 = gpu.subgroup_id : index
562 // CHECK: %[[SG_LOCAL_ID:.*]] = llvm.call spir_funccc @_Z22get_sub_group_local_id() {no_unwind, will_return} : () -> i32
563 // CHECK-32-NOT: llvm.zext
564 // CHECK-64: %{{.*}} = llvm.zext %[[SG_LOCAL_ID]] : i32 to i64
566 // CHECK: %[[NUM_SGS:.*]] = llvm.call spir_funccc @_Z18get_num_sub_groups() {no_unwind, will_return} : () -> i32
567 // CHECK-32-NOT: llvm.zext
568 // CHECK-64: %{{.*}} = llvm.zext %[[NUM_SGS]] : i32 to i64
569 %2 = gpu.num_subgroups : index
570 // CHECK: %[[SG_SIZE:.*]] = llvm.call spir_funccc @_Z18get_sub_group_size() {no_unwind, will_return} : () -> i32
571 // CHECK-32-NOT: llvm.zext
572 // CHECK-64: %{{.*}} = llvm.zext %[[SG_SIZE]] : i32 to i64
573 %3 = gpu.subgroup_size : index