2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
29 #include <linux/dma-buf.h>
31 static const unsigned int compute_vmid_bitmap
= 0xFF00;
33 /* Total memory size in system memory and all GPU VRAM. Used to
34 * estimate worst case amount of memory to reserve for page tables
36 uint64_t amdgpu_amdkfd_total_mem_size
;
38 int amdgpu_amdkfd_init(void)
44 amdgpu_amdkfd_total_mem_size
= si
.totalram
- si
.totalhigh
;
45 amdgpu_amdkfd_total_mem_size
*= si
.mem_unit
;
49 amdgpu_amdkfd_gpuvm_init_mem_limits();
57 void amdgpu_amdkfd_fini(void)
62 void amdgpu_amdkfd_device_probe(struct amdgpu_device
*adev
)
64 const struct kfd2kgd_calls
*kfd2kgd
;
66 switch (adev
->asic_type
) {
67 #ifdef CONFIG_DRM_AMDGPU_CIK
70 kfd2kgd
= amdgpu_amdkfd_gfx_7_get_functions();
79 kfd2kgd
= amdgpu_amdkfd_gfx_8_0_get_functions();
85 kfd2kgd
= amdgpu_amdkfd_gfx_9_0_get_functions();
88 dev_info(adev
->dev
, "kfd not supported on this ASIC\n");
92 adev
->kfd
.dev
= kgd2kfd_probe((struct kgd_dev
*)adev
,
96 amdgpu_amdkfd_total_mem_size
+= adev
->gmc
.real_vram_size
;
100 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
103 * @adev: amdgpu_device pointer
104 * @aperture_base: output returning doorbell aperture base physical address
105 * @aperture_size: output returning doorbell aperture size in bytes
106 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
108 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
109 * takes doorbells required for its own rings and reports the setup to amdkfd.
110 * amdgpu reserved doorbells are at the start of the doorbell aperture.
112 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device
*adev
,
113 phys_addr_t
*aperture_base
,
114 size_t *aperture_size
,
115 size_t *start_offset
)
118 * The first num_doorbells are used by amdgpu.
119 * amdkfd takes whatever's left in the aperture.
121 if (adev
->doorbell
.size
> adev
->doorbell
.num_doorbells
* sizeof(u32
)) {
122 *aperture_base
= adev
->doorbell
.base
;
123 *aperture_size
= adev
->doorbell
.size
;
124 *start_offset
= adev
->doorbell
.num_doorbells
* sizeof(u32
);
132 void amdgpu_amdkfd_device_init(struct amdgpu_device
*adev
)
138 struct kgd2kfd_shared_resources gpu_resources
= {
139 .compute_vmid_bitmap
= compute_vmid_bitmap
,
140 .num_pipe_per_mec
= adev
->gfx
.mec
.num_pipe_per_mec
,
141 .num_queue_per_pipe
= adev
->gfx
.mec
.num_queue_per_pipe
,
142 .gpuvm_size
= min(adev
->vm_manager
.max_pfn
143 << AMDGPU_GPU_PAGE_SHIFT
,
144 AMDGPU_GMC_HOLE_START
),
145 .drm_render_minor
= adev
->ddev
->render
->index
,
146 .sdma_doorbell_idx
= adev
->doorbell_index
.sdma_engine
,
150 /* this is going to have a few of the MSBs set that we need to
152 bitmap_complement(gpu_resources
.queue_bitmap
,
153 adev
->gfx
.mec
.queue_bitmap
,
156 /* remove the KIQ bit as well */
157 if (adev
->gfx
.kiq
.ring
.sched
.ready
)
158 clear_bit(amdgpu_gfx_queue_to_bit(adev
,
159 adev
->gfx
.kiq
.ring
.me
- 1,
160 adev
->gfx
.kiq
.ring
.pipe
,
161 adev
->gfx
.kiq
.ring
.queue
),
162 gpu_resources
.queue_bitmap
);
164 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
165 * nbits is not compile time constant */
166 last_valid_bit
= 1 /* only first MEC can have compute queues */
167 * adev
->gfx
.mec
.num_pipe_per_mec
168 * adev
->gfx
.mec
.num_queue_per_pipe
;
169 for (i
= last_valid_bit
; i
< KGD_MAX_QUEUES
; ++i
)
170 clear_bit(i
, gpu_resources
.queue_bitmap
);
172 amdgpu_doorbell_get_kfd_info(adev
,
173 &gpu_resources
.doorbell_physical_address
,
174 &gpu_resources
.doorbell_aperture_size
,
175 &gpu_resources
.doorbell_start_offset
);
177 /* Since SOC15, BIF starts to statically use the
178 * lower 12 bits of doorbell addresses for routing
179 * based on settings in registers like
180 * SDMA0_DOORBELL_RANGE etc..
181 * In order to route a doorbell to CP engine, the lower
182 * 12 bits of its address has to be outside the range
183 * set for SDMA, VCN, and IH blocks.
185 if (adev
->asic_type
>= CHIP_VEGA10
) {
186 gpu_resources
.non_cp_doorbells_start
=
187 adev
->doorbell_index
.first_non_cp
;
188 gpu_resources
.non_cp_doorbells_end
=
189 adev
->doorbell_index
.last_non_cp
;
192 kgd2kfd_device_init(adev
->kfd
.dev
, &gpu_resources
);
196 void amdgpu_amdkfd_device_fini(struct amdgpu_device
*adev
)
199 kgd2kfd_device_exit(adev
->kfd
.dev
);
200 adev
->kfd
.dev
= NULL
;
204 void amdgpu_amdkfd_interrupt(struct amdgpu_device
*adev
,
205 const void *ih_ring_entry
)
208 kgd2kfd_interrupt(adev
->kfd
.dev
, ih_ring_entry
);
211 void amdgpu_amdkfd_suspend(struct amdgpu_device
*adev
)
214 kgd2kfd_suspend(adev
->kfd
.dev
);
217 int amdgpu_amdkfd_resume(struct amdgpu_device
*adev
)
222 r
= kgd2kfd_resume(adev
->kfd
.dev
);
227 int amdgpu_amdkfd_pre_reset(struct amdgpu_device
*adev
)
232 r
= kgd2kfd_pre_reset(adev
->kfd
.dev
);
237 int amdgpu_amdkfd_post_reset(struct amdgpu_device
*adev
)
242 r
= kgd2kfd_post_reset(adev
->kfd
.dev
);
247 void amdgpu_amdkfd_gpu_reset(struct kgd_dev
*kgd
)
249 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
251 if (amdgpu_device_should_recover_gpu(adev
))
252 amdgpu_device_gpu_recover(adev
, NULL
);
255 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev
*kgd
, size_t size
,
256 void **mem_obj
, uint64_t *gpu_addr
,
257 void **cpu_ptr
, bool mqd_gfx9
)
259 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
260 struct amdgpu_bo
*bo
= NULL
;
261 struct amdgpu_bo_param bp
;
263 void *cpu_ptr_tmp
= NULL
;
265 memset(&bp
, 0, sizeof(bp
));
267 bp
.byte_align
= PAGE_SIZE
;
268 bp
.domain
= AMDGPU_GEM_DOMAIN_GTT
;
269 bp
.flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
270 bp
.type
= ttm_bo_type_kernel
;
274 bp
.flags
|= AMDGPU_GEM_CREATE_MQD_GFX9
;
276 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
279 "failed to allocate BO for amdkfd (%d)\n", r
);
284 r
= amdgpu_bo_reserve(bo
, true);
286 dev_err(adev
->dev
, "(%d) failed to reserve bo for amdkfd\n", r
);
287 goto allocate_mem_reserve_bo_failed
;
290 r
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
292 dev_err(adev
->dev
, "(%d) failed to pin bo for amdkfd\n", r
);
293 goto allocate_mem_pin_bo_failed
;
296 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
298 dev_err(adev
->dev
, "%p bind failed\n", bo
);
299 goto allocate_mem_kmap_bo_failed
;
302 r
= amdgpu_bo_kmap(bo
, &cpu_ptr_tmp
);
305 "(%d) failed to map bo to kernel for amdkfd\n", r
);
306 goto allocate_mem_kmap_bo_failed
;
310 *gpu_addr
= amdgpu_bo_gpu_offset(bo
);
311 *cpu_ptr
= cpu_ptr_tmp
;
313 amdgpu_bo_unreserve(bo
);
317 allocate_mem_kmap_bo_failed
:
319 allocate_mem_pin_bo_failed
:
320 amdgpu_bo_unreserve(bo
);
321 allocate_mem_reserve_bo_failed
:
322 amdgpu_bo_unref(&bo
);
327 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev
*kgd
, void *mem_obj
)
329 struct amdgpu_bo
*bo
= (struct amdgpu_bo
*) mem_obj
;
331 amdgpu_bo_reserve(bo
, true);
332 amdgpu_bo_kunmap(bo
);
334 amdgpu_bo_unreserve(bo
);
335 amdgpu_bo_unref(&(bo
));
338 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev
*kgd
,
339 struct kfd_local_mem_info
*mem_info
)
341 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
342 uint64_t address_mask
= adev
->dev
->dma_mask
? ~*adev
->dev
->dma_mask
:
344 resource_size_t aper_limit
= adev
->gmc
.aper_base
+ adev
->gmc
.aper_size
;
346 memset(mem_info
, 0, sizeof(*mem_info
));
347 if (!(adev
->gmc
.aper_base
& address_mask
|| aper_limit
& address_mask
)) {
348 mem_info
->local_mem_size_public
= adev
->gmc
.visible_vram_size
;
349 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
-
350 adev
->gmc
.visible_vram_size
;
352 mem_info
->local_mem_size_public
= 0;
353 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
;
355 mem_info
->vram_width
= adev
->gmc
.vram_width
;
357 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
358 &adev
->gmc
.aper_base
, &aper_limit
,
359 mem_info
->local_mem_size_public
,
360 mem_info
->local_mem_size_private
);
362 if (amdgpu_sriov_vf(adev
))
363 mem_info
->mem_clk_max
= adev
->clock
.default_mclk
/ 100;
364 else if (adev
->powerplay
.pp_funcs
)
365 mem_info
->mem_clk_max
= amdgpu_dpm_get_mclk(adev
, false) / 100;
367 mem_info
->mem_clk_max
= 100;
370 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev
*kgd
)
372 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
374 if (adev
->gfx
.funcs
->get_gpu_clock_counter
)
375 return adev
->gfx
.funcs
->get_gpu_clock_counter(adev
);
379 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev
*kgd
)
381 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
383 /* the sclk is in quantas of 10kHz */
384 if (amdgpu_sriov_vf(adev
))
385 return adev
->clock
.default_sclk
/ 100;
386 else if (adev
->powerplay
.pp_funcs
)
387 return amdgpu_dpm_get_sclk(adev
, false) / 100;
392 void amdgpu_amdkfd_get_cu_info(struct kgd_dev
*kgd
, struct kfd_cu_info
*cu_info
)
394 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
395 struct amdgpu_cu_info acu_info
= adev
->gfx
.cu_info
;
397 memset(cu_info
, 0, sizeof(*cu_info
));
398 if (sizeof(cu_info
->cu_bitmap
) != sizeof(acu_info
.bitmap
))
401 cu_info
->cu_active_number
= acu_info
.number
;
402 cu_info
->cu_ao_mask
= acu_info
.ao_cu_mask
;
403 memcpy(&cu_info
->cu_bitmap
[0], &acu_info
.bitmap
[0],
404 sizeof(acu_info
.bitmap
));
405 cu_info
->num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
406 cu_info
->num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
407 cu_info
->num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
408 cu_info
->simd_per_cu
= acu_info
.simd_per_cu
;
409 cu_info
->max_waves_per_simd
= acu_info
.max_waves_per_simd
;
410 cu_info
->wave_front_size
= acu_info
.wave_front_size
;
411 cu_info
->max_scratch_slots_per_cu
= acu_info
.max_scratch_slots_per_cu
;
412 cu_info
->lds_size
= acu_info
.lds_size
;
415 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev
*kgd
, int dma_buf_fd
,
416 struct kgd_dev
**dma_buf_kgd
,
417 uint64_t *bo_size
, void *metadata_buffer
,
418 size_t buffer_size
, uint32_t *metadata_size
,
421 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
422 struct dma_buf
*dma_buf
;
423 struct drm_gem_object
*obj
;
424 struct amdgpu_bo
*bo
;
425 uint64_t metadata_flags
;
428 dma_buf
= dma_buf_get(dma_buf_fd
);
430 return PTR_ERR(dma_buf
);
432 if (dma_buf
->ops
!= &amdgpu_dmabuf_ops
)
433 /* Can't handle non-graphics buffers */
437 if (obj
->dev
->driver
!= adev
->ddev
->driver
)
438 /* Can't handle buffers from different drivers */
441 adev
= obj
->dev
->dev_private
;
442 bo
= gem_to_amdgpu_bo(obj
);
443 if (!(bo
->preferred_domains
& (AMDGPU_GEM_DOMAIN_VRAM
|
444 AMDGPU_GEM_DOMAIN_GTT
)))
445 /* Only VRAM and GTT BOs are supported */
450 *dma_buf_kgd
= (struct kgd_dev
*)adev
;
452 *bo_size
= amdgpu_bo_size(bo
);
454 *metadata_size
= bo
->metadata_size
;
456 r
= amdgpu_bo_get_metadata(bo
, metadata_buffer
, buffer_size
,
457 metadata_size
, &metadata_flags
);
459 *flags
= (bo
->preferred_domains
& AMDGPU_GEM_DOMAIN_VRAM
) ?
460 ALLOC_MEM_FLAGS_VRAM
: ALLOC_MEM_FLAGS_GTT
;
462 if (bo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)
463 *flags
|= ALLOC_MEM_FLAGS_PUBLIC
;
467 dma_buf_put(dma_buf
);
471 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev
*kgd
)
473 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
475 return amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
478 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev
*kgd
)
480 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
482 return adev
->gmc
.xgmi
.hive_id
;
485 int amdgpu_amdkfd_submit_ib(struct kgd_dev
*kgd
, enum kgd_engine_type engine
,
486 uint32_t vmid
, uint64_t gpu_addr
,
487 uint32_t *ib_cmd
, uint32_t ib_len
)
489 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
490 struct amdgpu_job
*job
;
491 struct amdgpu_ib
*ib
;
492 struct amdgpu_ring
*ring
;
493 struct dma_fence
*f
= NULL
;
497 case KGD_ENGINE_MEC1
:
498 ring
= &adev
->gfx
.compute_ring
[0];
500 case KGD_ENGINE_SDMA1
:
501 ring
= &adev
->sdma
.instance
[0].ring
;
503 case KGD_ENGINE_SDMA2
:
504 ring
= &adev
->sdma
.instance
[1].ring
;
507 pr_err("Invalid engine in IB submission: %d\n", engine
);
512 ret
= amdgpu_job_alloc(adev
, 1, &job
, NULL
);
517 memset(ib
, 0, sizeof(struct amdgpu_ib
));
519 ib
->gpu_addr
= gpu_addr
;
521 ib
->length_dw
= ib_len
;
522 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
525 ret
= amdgpu_ib_schedule(ring
, 1, ib
, job
, &f
);
527 DRM_ERROR("amdgpu: failed to schedule IB.\n");
531 ret
= dma_fence_wait(f
, false);
535 amdgpu_job_free(job
);
540 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev
*kgd
, bool idle
)
542 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
544 if (adev
->powerplay
.pp_funcs
&&
545 adev
->powerplay
.pp_funcs
->switch_power_profile
)
546 amdgpu_dpm_switch_power_profile(adev
,
547 PP_SMC_POWER_PROFILE_COMPUTE
,
551 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device
*adev
, u32 vmid
)
554 if ((1 << vmid
) & compute_vmid_bitmap
)
561 #ifndef CONFIG_HSA_AMD
562 bool amdkfd_fence_check_mm(struct dma_fence
*f
, struct mm_struct
*mm
)
567 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo
*bo
)
571 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device
*adev
,
572 struct amdgpu_vm
*vm
)
576 struct amdgpu_amdkfd_fence
*to_amdgpu_amdkfd_fence(struct dma_fence
*f
)
581 int amdgpu_amdkfd_evict_userptr(struct kgd_mem
*mem
, struct mm_struct
*mm
)
586 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_7_get_functions(void)
591 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_8_0_get_functions(void)
596 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_9_0_get_functions(void)
601 struct kfd_dev
*kgd2kfd_probe(struct kgd_dev
*kgd
, struct pci_dev
*pdev
,
602 const struct kfd2kgd_calls
*f2g
)
607 bool kgd2kfd_device_init(struct kfd_dev
*kfd
,
608 const struct kgd2kfd_shared_resources
*gpu_resources
)
613 void kgd2kfd_device_exit(struct kfd_dev
*kfd
)
617 void kgd2kfd_exit(void)
621 void kgd2kfd_suspend(struct kfd_dev
*kfd
)
625 int kgd2kfd_resume(struct kfd_dev
*kfd
)
630 int kgd2kfd_pre_reset(struct kfd_dev
*kfd
)
635 int kgd2kfd_post_reset(struct kfd_dev
*kfd
)
640 void kgd2kfd_interrupt(struct kfd_dev
*kfd
, const void *ih_ring_entry
)