2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
30 const struct kgd2kfd_calls
*kgd2kfd
;
31 bool (*kgd2kfd_init_p
)(unsigned int, const struct kgd2kfd_calls
**);
33 static const unsigned int compute_vmid_bitmap
= 0xFF00;
35 int amdgpu_amdkfd_init(void)
39 #if defined(CONFIG_HSA_AMD_MODULE)
40 int (*kgd2kfd_init_p
)(unsigned int, const struct kgd2kfd_calls
**);
42 kgd2kfd_init_p
= symbol_request(kgd2kfd_init
);
44 if (kgd2kfd_init_p
== NULL
)
47 ret
= kgd2kfd_init_p(KFD_INTERFACE_VERSION
, &kgd2kfd
);
49 symbol_put(kgd2kfd_init
);
54 #elif defined(CONFIG_HSA_AMD)
56 ret
= kgd2kfd_init(KFD_INTERFACE_VERSION
, &kgd2kfd
);
65 #if defined(CONFIG_HSA_AMD_MODULE) || defined(CONFIG_HSA_AMD)
66 amdgpu_amdkfd_gpuvm_init_mem_limits();
72 void amdgpu_amdkfd_fini(void)
76 symbol_put(kgd2kfd_init
);
80 void amdgpu_amdkfd_device_probe(struct amdgpu_device
*adev
)
82 const struct kfd2kgd_calls
*kfd2kgd
;
87 switch (adev
->asic_type
) {
88 #ifdef CONFIG_DRM_AMDGPU_CIK
91 kfd2kgd
= amdgpu_amdkfd_gfx_7_get_functions();
99 kfd2kgd
= amdgpu_amdkfd_gfx_8_0_get_functions();
103 kfd2kgd
= amdgpu_amdkfd_gfx_9_0_get_functions();
106 dev_info(adev
->dev
, "kfd not supported on this ASIC\n");
110 adev
->kfd
= kgd2kfd
->probe((struct kgd_dev
*)adev
,
111 adev
->pdev
, kfd2kgd
);
115 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
118 * @adev: amdgpu_device pointer
119 * @aperture_base: output returning doorbell aperture base physical address
120 * @aperture_size: output returning doorbell aperture size in bytes
121 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
123 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
124 * takes doorbells required for its own rings and reports the setup to amdkfd.
125 * amdgpu reserved doorbells are at the start of the doorbell aperture.
127 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device
*adev
,
128 phys_addr_t
*aperture_base
,
129 size_t *aperture_size
,
130 size_t *start_offset
)
133 * The first num_doorbells are used by amdgpu.
134 * amdkfd takes whatever's left in the aperture.
136 if (adev
->doorbell
.size
> adev
->doorbell
.num_doorbells
* sizeof(u32
)) {
137 *aperture_base
= adev
->doorbell
.base
;
138 *aperture_size
= adev
->doorbell
.size
;
139 *start_offset
= adev
->doorbell
.num_doorbells
* sizeof(u32
);
147 void amdgpu_amdkfd_device_init(struct amdgpu_device
*adev
)
152 struct kgd2kfd_shared_resources gpu_resources
= {
153 .compute_vmid_bitmap
= compute_vmid_bitmap
,
154 .num_pipe_per_mec
= adev
->gfx
.mec
.num_pipe_per_mec
,
155 .num_queue_per_pipe
= adev
->gfx
.mec
.num_queue_per_pipe
,
156 .gpuvm_size
= min(adev
->vm_manager
.max_pfn
157 << AMDGPU_GPU_PAGE_SHIFT
,
158 AMDGPU_VA_HOLE_START
),
159 .drm_render_minor
= adev
->ddev
->render
->index
162 /* this is going to have a few of the MSBs set that we need to
164 bitmap_complement(gpu_resources
.queue_bitmap
,
165 adev
->gfx
.mec
.queue_bitmap
,
168 /* remove the KIQ bit as well */
169 if (adev
->gfx
.kiq
.ring
.ready
)
170 clear_bit(amdgpu_gfx_queue_to_bit(adev
,
171 adev
->gfx
.kiq
.ring
.me
- 1,
172 adev
->gfx
.kiq
.ring
.pipe
,
173 adev
->gfx
.kiq
.ring
.queue
),
174 gpu_resources
.queue_bitmap
);
176 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
177 * nbits is not compile time constant */
178 last_valid_bit
= 1 /* only first MEC can have compute queues */
179 * adev
->gfx
.mec
.num_pipe_per_mec
180 * adev
->gfx
.mec
.num_queue_per_pipe
;
181 for (i
= last_valid_bit
; i
< KGD_MAX_QUEUES
; ++i
)
182 clear_bit(i
, gpu_resources
.queue_bitmap
);
184 amdgpu_doorbell_get_kfd_info(adev
,
185 &gpu_resources
.doorbell_physical_address
,
186 &gpu_resources
.doorbell_aperture_size
,
187 &gpu_resources
.doorbell_start_offset
);
188 if (adev
->asic_type
>= CHIP_VEGA10
) {
189 /* On SOC15 the BIF is involved in routing
190 * doorbells using the low 12 bits of the
191 * address. Communicate the assignments to
192 * KFD. KFD uses two doorbell pages per
193 * process in case of 64-bit doorbells so we
194 * can use each doorbell assignment twice.
196 gpu_resources
.sdma_doorbell
[0][0] =
197 AMDGPU_DOORBELL64_sDMA_ENGINE0
;
198 gpu_resources
.sdma_doorbell
[0][1] =
199 AMDGPU_DOORBELL64_sDMA_ENGINE0
+ 0x200;
200 gpu_resources
.sdma_doorbell
[1][0] =
201 AMDGPU_DOORBELL64_sDMA_ENGINE1
;
202 gpu_resources
.sdma_doorbell
[1][1] =
203 AMDGPU_DOORBELL64_sDMA_ENGINE1
+ 0x200;
204 /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
205 * SDMA, IH and VCN. So don't use them for the CP.
207 gpu_resources
.reserved_doorbell_mask
= 0x1f0;
208 gpu_resources
.reserved_doorbell_val
= 0x0f0;
211 kgd2kfd
->device_init(adev
->kfd
, &gpu_resources
);
215 void amdgpu_amdkfd_device_fini(struct amdgpu_device
*adev
)
218 kgd2kfd
->device_exit(adev
->kfd
);
223 void amdgpu_amdkfd_interrupt(struct amdgpu_device
*adev
,
224 const void *ih_ring_entry
)
227 kgd2kfd
->interrupt(adev
->kfd
, ih_ring_entry
);
230 void amdgpu_amdkfd_suspend(struct amdgpu_device
*adev
)
233 kgd2kfd
->suspend(adev
->kfd
);
236 int amdgpu_amdkfd_resume(struct amdgpu_device
*adev
)
241 r
= kgd2kfd
->resume(adev
->kfd
);
246 int amdgpu_amdkfd_pre_reset(struct amdgpu_device
*adev
)
251 r
= kgd2kfd
->pre_reset(adev
->kfd
);
256 int amdgpu_amdkfd_post_reset(struct amdgpu_device
*adev
)
261 r
= kgd2kfd
->post_reset(adev
->kfd
);
266 void amdgpu_amdkfd_gpu_reset(struct kgd_dev
*kgd
)
268 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
270 amdgpu_device_gpu_recover(adev
, NULL
, false);
273 int alloc_gtt_mem(struct kgd_dev
*kgd
, size_t size
,
274 void **mem_obj
, uint64_t *gpu_addr
,
275 void **cpu_ptr
, bool mqd_gfx9
)
277 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
278 struct amdgpu_bo
*bo
= NULL
;
279 struct amdgpu_bo_param bp
;
281 void *cpu_ptr_tmp
= NULL
;
283 memset(&bp
, 0, sizeof(bp
));
285 bp
.byte_align
= PAGE_SIZE
;
286 bp
.domain
= AMDGPU_GEM_DOMAIN_GTT
;
287 bp
.flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
288 bp
.type
= ttm_bo_type_kernel
;
292 bp
.flags
|= AMDGPU_GEM_CREATE_MQD_GFX9
;
294 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
297 "failed to allocate BO for amdkfd (%d)\n", r
);
302 r
= amdgpu_bo_reserve(bo
, true);
304 dev_err(adev
->dev
, "(%d) failed to reserve bo for amdkfd\n", r
);
305 goto allocate_mem_reserve_bo_failed
;
308 r
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
310 dev_err(adev
->dev
, "(%d) failed to pin bo for amdkfd\n", r
);
311 goto allocate_mem_pin_bo_failed
;
314 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
316 dev_err(adev
->dev
, "%p bind failed\n", bo
);
317 goto allocate_mem_kmap_bo_failed
;
320 r
= amdgpu_bo_kmap(bo
, &cpu_ptr_tmp
);
323 "(%d) failed to map bo to kernel for amdkfd\n", r
);
324 goto allocate_mem_kmap_bo_failed
;
328 *gpu_addr
= amdgpu_bo_gpu_offset(bo
);
329 *cpu_ptr
= cpu_ptr_tmp
;
331 amdgpu_bo_unreserve(bo
);
335 allocate_mem_kmap_bo_failed
:
337 allocate_mem_pin_bo_failed
:
338 amdgpu_bo_unreserve(bo
);
339 allocate_mem_reserve_bo_failed
:
340 amdgpu_bo_unref(&bo
);
345 void free_gtt_mem(struct kgd_dev
*kgd
, void *mem_obj
)
347 struct amdgpu_bo
*bo
= (struct amdgpu_bo
*) mem_obj
;
349 amdgpu_bo_reserve(bo
, true);
350 amdgpu_bo_kunmap(bo
);
352 amdgpu_bo_unreserve(bo
);
353 amdgpu_bo_unref(&(bo
));
356 void get_local_mem_info(struct kgd_dev
*kgd
,
357 struct kfd_local_mem_info
*mem_info
)
359 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
360 uint64_t address_mask
= adev
->dev
->dma_mask
? ~*adev
->dev
->dma_mask
:
362 resource_size_t aper_limit
= adev
->gmc
.aper_base
+ adev
->gmc
.aper_size
;
364 memset(mem_info
, 0, sizeof(*mem_info
));
365 if (!(adev
->gmc
.aper_base
& address_mask
|| aper_limit
& address_mask
)) {
366 mem_info
->local_mem_size_public
= adev
->gmc
.visible_vram_size
;
367 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
-
368 adev
->gmc
.visible_vram_size
;
370 mem_info
->local_mem_size_public
= 0;
371 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
;
373 mem_info
->vram_width
= adev
->gmc
.vram_width
;
375 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
376 &adev
->gmc
.aper_base
, &aper_limit
,
377 mem_info
->local_mem_size_public
,
378 mem_info
->local_mem_size_private
);
380 if (amdgpu_sriov_vf(adev
))
381 mem_info
->mem_clk_max
= adev
->clock
.default_mclk
/ 100;
382 else if (adev
->powerplay
.pp_funcs
)
383 mem_info
->mem_clk_max
= amdgpu_dpm_get_mclk(adev
, false) / 100;
385 mem_info
->mem_clk_max
= 100;
388 uint64_t get_gpu_clock_counter(struct kgd_dev
*kgd
)
390 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
392 if (adev
->gfx
.funcs
->get_gpu_clock_counter
)
393 return adev
->gfx
.funcs
->get_gpu_clock_counter(adev
);
397 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev
*kgd
)
399 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
401 /* the sclk is in quantas of 10kHz */
402 if (amdgpu_sriov_vf(adev
))
403 return adev
->clock
.default_sclk
/ 100;
404 else if (adev
->powerplay
.pp_funcs
)
405 return amdgpu_dpm_get_sclk(adev
, false) / 100;
410 void get_cu_info(struct kgd_dev
*kgd
, struct kfd_cu_info
*cu_info
)
412 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
413 struct amdgpu_cu_info acu_info
= adev
->gfx
.cu_info
;
415 memset(cu_info
, 0, sizeof(*cu_info
));
416 if (sizeof(cu_info
->cu_bitmap
) != sizeof(acu_info
.bitmap
))
419 cu_info
->cu_active_number
= acu_info
.number
;
420 cu_info
->cu_ao_mask
= acu_info
.ao_cu_mask
;
421 memcpy(&cu_info
->cu_bitmap
[0], &acu_info
.bitmap
[0],
422 sizeof(acu_info
.bitmap
));
423 cu_info
->num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
424 cu_info
->num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
425 cu_info
->num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
426 cu_info
->simd_per_cu
= acu_info
.simd_per_cu
;
427 cu_info
->max_waves_per_simd
= acu_info
.max_waves_per_simd
;
428 cu_info
->wave_front_size
= acu_info
.wave_front_size
;
429 cu_info
->max_scratch_slots_per_cu
= acu_info
.max_scratch_slots_per_cu
;
430 cu_info
->lds_size
= acu_info
.lds_size
;
433 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev
*kgd
)
435 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
437 return amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
440 int amdgpu_amdkfd_submit_ib(struct kgd_dev
*kgd
, enum kgd_engine_type engine
,
441 uint32_t vmid
, uint64_t gpu_addr
,
442 uint32_t *ib_cmd
, uint32_t ib_len
)
444 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
445 struct amdgpu_job
*job
;
446 struct amdgpu_ib
*ib
;
447 struct amdgpu_ring
*ring
;
448 struct dma_fence
*f
= NULL
;
452 case KGD_ENGINE_MEC1
:
453 ring
= &adev
->gfx
.compute_ring
[0];
455 case KGD_ENGINE_SDMA1
:
456 ring
= &adev
->sdma
.instance
[0].ring
;
458 case KGD_ENGINE_SDMA2
:
459 ring
= &adev
->sdma
.instance
[1].ring
;
462 pr_err("Invalid engine in IB submission: %d\n", engine
);
467 ret
= amdgpu_job_alloc(adev
, 1, &job
, NULL
);
472 memset(ib
, 0, sizeof(struct amdgpu_ib
));
474 ib
->gpu_addr
= gpu_addr
;
476 ib
->length_dw
= ib_len
;
477 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
480 ret
= amdgpu_ib_schedule(ring
, 1, ib
, job
, &f
);
482 DRM_ERROR("amdgpu: failed to schedule IB.\n");
486 ret
= dma_fence_wait(f
, false);
490 amdgpu_job_free(job
);
495 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev
*kgd
, bool idle
)
497 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
499 if (adev
->powerplay
.pp_funcs
&&
500 adev
->powerplay
.pp_funcs
->switch_power_profile
)
501 amdgpu_dpm_switch_power_profile(adev
,
502 PP_SMC_POWER_PROFILE_COMPUTE
,
506 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device
*adev
, u32 vmid
)
509 if ((1 << vmid
) & compute_vmid_bitmap
)
516 #if !defined(CONFIG_HSA_AMD_MODULE) && !defined(CONFIG_HSA_AMD)
517 bool amdkfd_fence_check_mm(struct dma_fence
*f
, struct mm_struct
*mm
)
522 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo
*bo
)
526 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device
*adev
,
527 struct amdgpu_vm
*vm
)
531 struct amdgpu_amdkfd_fence
*to_amdgpu_amdkfd_fence(struct dma_fence
*f
)
536 int amdgpu_amdkfd_evict_userptr(struct kgd_mem
*mem
, struct mm_struct
*mm
)
541 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_7_get_functions(void)
546 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_8_0_get_functions(void)
551 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_9_0_get_functions(void)