2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
30 const struct kgd2kfd_calls
*kgd2kfd
;
31 bool (*kgd2kfd_init_p
)(unsigned int, const struct kgd2kfd_calls
**);
33 int amdgpu_amdkfd_init(void)
37 #if defined(CONFIG_HSA_AMD_MODULE)
38 int (*kgd2kfd_init_p
)(unsigned int, const struct kgd2kfd_calls
**);
40 kgd2kfd_init_p
= symbol_request(kgd2kfd_init
);
42 if (kgd2kfd_init_p
== NULL
)
45 ret
= kgd2kfd_init_p(KFD_INTERFACE_VERSION
, &kgd2kfd
);
47 symbol_put(kgd2kfd_init
);
51 #elif defined(CONFIG_HSA_AMD)
52 ret
= kgd2kfd_init(KFD_INTERFACE_VERSION
, &kgd2kfd
);
63 void amdgpu_amdkfd_fini(void)
67 symbol_put(kgd2kfd_init
);
71 void amdgpu_amdkfd_device_probe(struct amdgpu_device
*adev
)
73 const struct kfd2kgd_calls
*kfd2kgd
;
78 switch (adev
->asic_type
) {
79 #ifdef CONFIG_DRM_AMDGPU_CIK
81 kfd2kgd
= amdgpu_amdkfd_gfx_7_get_functions();
85 kfd2kgd
= amdgpu_amdkfd_gfx_8_0_get_functions();
88 dev_dbg(adev
->dev
, "kfd not supported on this ASIC\n");
92 adev
->kfd
= kgd2kfd
->probe((struct kgd_dev
*)adev
,
97 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
100 * @adev: amdgpu_device pointer
101 * @aperture_base: output returning doorbell aperture base physical address
102 * @aperture_size: output returning doorbell aperture size in bytes
103 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
105 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
106 * takes doorbells required for its own rings and reports the setup to amdkfd.
107 * amdgpu reserved doorbells are at the start of the doorbell aperture.
109 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device
*adev
,
110 phys_addr_t
*aperture_base
,
111 size_t *aperture_size
,
112 size_t *start_offset
)
115 * The first num_doorbells are used by amdgpu.
116 * amdkfd takes whatever's left in the aperture.
118 if (adev
->doorbell
.size
> adev
->doorbell
.num_doorbells
* sizeof(u32
)) {
119 *aperture_base
= adev
->doorbell
.base
;
120 *aperture_size
= adev
->doorbell
.size
;
121 *start_offset
= adev
->doorbell
.num_doorbells
* sizeof(u32
);
129 void amdgpu_amdkfd_device_init(struct amdgpu_device
*adev
)
134 struct kgd2kfd_shared_resources gpu_resources
= {
135 .compute_vmid_bitmap
= 0xFF00,
136 .num_pipe_per_mec
= adev
->gfx
.mec
.num_pipe_per_mec
,
137 .num_queue_per_pipe
= adev
->gfx
.mec
.num_queue_per_pipe
140 /* this is going to have a few of the MSBs set that we need to
142 bitmap_complement(gpu_resources
.queue_bitmap
,
143 adev
->gfx
.mec
.queue_bitmap
,
146 /* remove the KIQ bit as well */
147 if (adev
->gfx
.kiq
.ring
.ready
)
148 clear_bit(amdgpu_gfx_queue_to_bit(adev
,
149 adev
->gfx
.kiq
.ring
.me
- 1,
150 adev
->gfx
.kiq
.ring
.pipe
,
151 adev
->gfx
.kiq
.ring
.queue
),
152 gpu_resources
.queue_bitmap
);
154 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
155 * nbits is not compile time constant */
156 last_valid_bit
= 1 /* only first MEC can have compute queues */
157 * adev
->gfx
.mec
.num_pipe_per_mec
158 * adev
->gfx
.mec
.num_queue_per_pipe
;
159 for (i
= last_valid_bit
; i
< KGD_MAX_QUEUES
; ++i
)
160 clear_bit(i
, gpu_resources
.queue_bitmap
);
162 amdgpu_doorbell_get_kfd_info(adev
,
163 &gpu_resources
.doorbell_physical_address
,
164 &gpu_resources
.doorbell_aperture_size
,
165 &gpu_resources
.doorbell_start_offset
);
167 kgd2kfd
->device_init(adev
->kfd
, &gpu_resources
);
171 void amdgpu_amdkfd_device_fini(struct amdgpu_device
*adev
)
174 kgd2kfd
->device_exit(adev
->kfd
);
179 void amdgpu_amdkfd_interrupt(struct amdgpu_device
*adev
,
180 const void *ih_ring_entry
)
183 kgd2kfd
->interrupt(adev
->kfd
, ih_ring_entry
);
186 void amdgpu_amdkfd_suspend(struct amdgpu_device
*adev
)
189 kgd2kfd
->suspend(adev
->kfd
);
192 int amdgpu_amdkfd_resume(struct amdgpu_device
*adev
)
197 r
= kgd2kfd
->resume(adev
->kfd
);
202 int alloc_gtt_mem(struct kgd_dev
*kgd
, size_t size
,
203 void **mem_obj
, uint64_t *gpu_addr
,
206 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
207 struct kgd_mem
**mem
= (struct kgd_mem
**) mem_obj
;
211 BUG_ON(gpu_addr
== NULL
);
212 BUG_ON(cpu_ptr
== NULL
);
214 *mem
= kmalloc(sizeof(struct kgd_mem
), GFP_KERNEL
);
218 r
= amdgpu_bo_create(adev
, size
, PAGE_SIZE
, true, AMDGPU_GEM_DOMAIN_GTT
,
219 AMDGPU_GEM_CREATE_CPU_GTT_USWC
, NULL
, NULL
, 0,
223 "failed to allocate BO for amdkfd (%d)\n", r
);
228 r
= amdgpu_bo_reserve((*mem
)->bo
, true);
230 dev_err(adev
->dev
, "(%d) failed to reserve bo for amdkfd\n", r
);
231 goto allocate_mem_reserve_bo_failed
;
234 r
= amdgpu_bo_pin((*mem
)->bo
, AMDGPU_GEM_DOMAIN_GTT
,
237 dev_err(adev
->dev
, "(%d) failed to pin bo for amdkfd\n", r
);
238 goto allocate_mem_pin_bo_failed
;
240 *gpu_addr
= (*mem
)->gpu_addr
;
242 r
= amdgpu_bo_kmap((*mem
)->bo
, &(*mem
)->cpu_ptr
);
245 "(%d) failed to map bo to kernel for amdkfd\n", r
);
246 goto allocate_mem_kmap_bo_failed
;
248 *cpu_ptr
= (*mem
)->cpu_ptr
;
250 amdgpu_bo_unreserve((*mem
)->bo
);
254 allocate_mem_kmap_bo_failed
:
255 amdgpu_bo_unpin((*mem
)->bo
);
256 allocate_mem_pin_bo_failed
:
257 amdgpu_bo_unreserve((*mem
)->bo
);
258 allocate_mem_reserve_bo_failed
:
259 amdgpu_bo_unref(&(*mem
)->bo
);
264 void free_gtt_mem(struct kgd_dev
*kgd
, void *mem_obj
)
266 struct kgd_mem
*mem
= (struct kgd_mem
*) mem_obj
;
270 amdgpu_bo_reserve(mem
->bo
, true);
271 amdgpu_bo_kunmap(mem
->bo
);
272 amdgpu_bo_unpin(mem
->bo
);
273 amdgpu_bo_unreserve(mem
->bo
);
274 amdgpu_bo_unref(&(mem
->bo
));
278 void get_local_mem_info(struct kgd_dev
*kgd
,
279 struct kfd_local_mem_info
*mem_info
)
281 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
282 uint64_t address_mask
= adev
->dev
->dma_mask
? ~*adev
->dev
->dma_mask
:
284 resource_size_t aper_limit
= adev
->mc
.aper_base
+ adev
->mc
.aper_size
;
286 memset(mem_info
, 0, sizeof(*mem_info
));
287 if (!(adev
->mc
.aper_base
& address_mask
|| aper_limit
& address_mask
)) {
288 mem_info
->local_mem_size_public
= adev
->mc
.visible_vram_size
;
289 mem_info
->local_mem_size_private
= adev
->mc
.real_vram_size
-
290 adev
->mc
.visible_vram_size
;
292 mem_info
->local_mem_size_public
= 0;
293 mem_info
->local_mem_size_private
= adev
->mc
.real_vram_size
;
295 mem_info
->vram_width
= adev
->mc
.vram_width
;
297 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
298 &adev
->mc
.aper_base
, &aper_limit
,
299 mem_info
->local_mem_size_public
,
300 mem_info
->local_mem_size_private
);
302 if (amdgpu_sriov_vf(adev
))
303 mem_info
->mem_clk_max
= adev
->clock
.default_mclk
/ 100;
305 mem_info
->mem_clk_max
= amdgpu_dpm_get_mclk(adev
, false) / 100;
308 uint64_t get_gpu_clock_counter(struct kgd_dev
*kgd
)
310 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
312 if (adev
->gfx
.funcs
->get_gpu_clock_counter
)
313 return adev
->gfx
.funcs
->get_gpu_clock_counter(adev
);
317 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev
*kgd
)
319 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
321 /* the sclk is in quantas of 10kHz */
322 if (amdgpu_sriov_vf(adev
))
323 return adev
->clock
.default_sclk
/ 100;
325 return amdgpu_dpm_get_sclk(adev
, false) / 100;
328 void get_cu_info(struct kgd_dev
*kgd
, struct kfd_cu_info
*cu_info
)
330 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
331 struct amdgpu_cu_info acu_info
= adev
->gfx
.cu_info
;
333 memset(cu_info
, 0, sizeof(*cu_info
));
334 if (sizeof(cu_info
->cu_bitmap
) != sizeof(acu_info
.bitmap
))
337 cu_info
->cu_active_number
= acu_info
.number
;
338 cu_info
->cu_ao_mask
= acu_info
.ao_cu_mask
;
339 memcpy(&cu_info
->cu_bitmap
[0], &acu_info
.bitmap
[0],
340 sizeof(acu_info
.bitmap
));
341 cu_info
->num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
342 cu_info
->num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
343 cu_info
->num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
344 cu_info
->simd_per_cu
= acu_info
.simd_per_cu
;
345 cu_info
->max_waves_per_simd
= acu_info
.max_waves_per_simd
;
346 cu_info
->wave_front_size
= acu_info
.wave_front_size
;
347 cu_info
->max_scratch_slots_per_cu
= acu_info
.max_scratch_slots_per_cu
;
348 cu_info
->lds_size
= acu_info
.lds_size
;
351 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev
*kgd
)
353 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
355 return amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);