2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_dma_buf.h"
29 #include <linux/module.h>
30 #include <linux/dma-buf.h>
31 #include "amdgpu_xgmi.h"
33 static const unsigned int compute_vmid_bitmap
= 0xFF00;
35 /* Total memory size in system memory and all GPU VRAM. Used to
36 * estimate worst case amount of memory to reserve for page tables
38 uint64_t amdgpu_amdkfd_total_mem_size
;
40 int amdgpu_amdkfd_init(void)
46 amdgpu_amdkfd_total_mem_size
= si
.totalram
- si
.totalhigh
;
47 amdgpu_amdkfd_total_mem_size
*= si
.mem_unit
;
51 amdgpu_amdkfd_gpuvm_init_mem_limits();
59 void amdgpu_amdkfd_fini(void)
64 void amdgpu_amdkfd_device_probe(struct amdgpu_device
*adev
)
66 bool vf
= amdgpu_sriov_vf(adev
);
68 adev
->kfd
.dev
= kgd2kfd_probe((struct kgd_dev
*)adev
,
69 adev
->pdev
, adev
->asic_type
, vf
);
72 amdgpu_amdkfd_total_mem_size
+= adev
->gmc
.real_vram_size
;
76 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
79 * @adev: amdgpu_device pointer
80 * @aperture_base: output returning doorbell aperture base physical address
81 * @aperture_size: output returning doorbell aperture size in bytes
82 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
84 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
85 * takes doorbells required for its own rings and reports the setup to amdkfd.
86 * amdgpu reserved doorbells are at the start of the doorbell aperture.
88 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device
*adev
,
89 phys_addr_t
*aperture_base
,
90 size_t *aperture_size
,
94 * The first num_doorbells are used by amdgpu.
95 * amdkfd takes whatever's left in the aperture.
97 if (adev
->doorbell
.size
> adev
->doorbell
.num_doorbells
* sizeof(u32
)) {
98 *aperture_base
= adev
->doorbell
.base
;
99 *aperture_size
= adev
->doorbell
.size
;
100 *start_offset
= adev
->doorbell
.num_doorbells
* sizeof(u32
);
108 void amdgpu_amdkfd_device_init(struct amdgpu_device
*adev
)
114 struct kgd2kfd_shared_resources gpu_resources
= {
115 .compute_vmid_bitmap
= compute_vmid_bitmap
,
116 .num_pipe_per_mec
= adev
->gfx
.mec
.num_pipe_per_mec
,
117 .num_queue_per_pipe
= adev
->gfx
.mec
.num_queue_per_pipe
,
118 .gpuvm_size
= min(adev
->vm_manager
.max_pfn
119 << AMDGPU_GPU_PAGE_SHIFT
,
120 AMDGPU_GMC_HOLE_START
),
121 .drm_render_minor
= adev
->ddev
->render
->index
,
122 .sdma_doorbell_idx
= adev
->doorbell_index
.sdma_engine
,
126 /* this is going to have a few of the MSBs set that we need to
129 bitmap_complement(gpu_resources
.queue_bitmap
,
130 adev
->gfx
.mec
.queue_bitmap
,
133 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
134 * nbits is not compile time constant
136 last_valid_bit
= 1 /* only first MEC can have compute queues */
137 * adev
->gfx
.mec
.num_pipe_per_mec
138 * adev
->gfx
.mec
.num_queue_per_pipe
;
139 for (i
= last_valid_bit
; i
< KGD_MAX_QUEUES
; ++i
)
140 clear_bit(i
, gpu_resources
.queue_bitmap
);
142 amdgpu_doorbell_get_kfd_info(adev
,
143 &gpu_resources
.doorbell_physical_address
,
144 &gpu_resources
.doorbell_aperture_size
,
145 &gpu_resources
.doorbell_start_offset
);
147 /* Since SOC15, BIF starts to statically use the
148 * lower 12 bits of doorbell addresses for routing
149 * based on settings in registers like
150 * SDMA0_DOORBELL_RANGE etc..
151 * In order to route a doorbell to CP engine, the lower
152 * 12 bits of its address has to be outside the range
153 * set for SDMA, VCN, and IH blocks.
155 if (adev
->asic_type
>= CHIP_VEGA10
) {
156 gpu_resources
.non_cp_doorbells_start
=
157 adev
->doorbell_index
.first_non_cp
;
158 gpu_resources
.non_cp_doorbells_end
=
159 adev
->doorbell_index
.last_non_cp
;
162 kgd2kfd_device_init(adev
->kfd
.dev
, adev
->ddev
, &gpu_resources
);
166 void amdgpu_amdkfd_device_fini(struct amdgpu_device
*adev
)
169 kgd2kfd_device_exit(adev
->kfd
.dev
);
170 adev
->kfd
.dev
= NULL
;
174 void amdgpu_amdkfd_interrupt(struct amdgpu_device
*adev
,
175 const void *ih_ring_entry
)
178 kgd2kfd_interrupt(adev
->kfd
.dev
, ih_ring_entry
);
181 void amdgpu_amdkfd_suspend(struct amdgpu_device
*adev
)
184 kgd2kfd_suspend(adev
->kfd
.dev
);
187 int amdgpu_amdkfd_resume(struct amdgpu_device
*adev
)
192 r
= kgd2kfd_resume(adev
->kfd
.dev
);
197 int amdgpu_amdkfd_pre_reset(struct amdgpu_device
*adev
)
202 r
= kgd2kfd_pre_reset(adev
->kfd
.dev
);
207 int amdgpu_amdkfd_post_reset(struct amdgpu_device
*adev
)
212 r
= kgd2kfd_post_reset(adev
->kfd
.dev
);
217 void amdgpu_amdkfd_gpu_reset(struct kgd_dev
*kgd
)
219 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
221 if (amdgpu_device_should_recover_gpu(adev
))
222 amdgpu_device_gpu_recover(adev
, NULL
);
225 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev
*kgd
, size_t size
,
226 void **mem_obj
, uint64_t *gpu_addr
,
227 void **cpu_ptr
, bool mqd_gfx9
)
229 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
230 struct amdgpu_bo
*bo
= NULL
;
231 struct amdgpu_bo_param bp
;
233 void *cpu_ptr_tmp
= NULL
;
235 memset(&bp
, 0, sizeof(bp
));
237 bp
.byte_align
= PAGE_SIZE
;
238 bp
.domain
= AMDGPU_GEM_DOMAIN_GTT
;
239 bp
.flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
240 bp
.type
= ttm_bo_type_kernel
;
244 bp
.flags
|= AMDGPU_GEM_CREATE_MQD_GFX9
;
246 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
249 "failed to allocate BO for amdkfd (%d)\n", r
);
254 r
= amdgpu_bo_reserve(bo
, true);
256 dev_err(adev
->dev
, "(%d) failed to reserve bo for amdkfd\n", r
);
257 goto allocate_mem_reserve_bo_failed
;
260 r
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
262 dev_err(adev
->dev
, "(%d) failed to pin bo for amdkfd\n", r
);
263 goto allocate_mem_pin_bo_failed
;
266 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
268 dev_err(adev
->dev
, "%p bind failed\n", bo
);
269 goto allocate_mem_kmap_bo_failed
;
272 r
= amdgpu_bo_kmap(bo
, &cpu_ptr_tmp
);
275 "(%d) failed to map bo to kernel for amdkfd\n", r
);
276 goto allocate_mem_kmap_bo_failed
;
280 *gpu_addr
= amdgpu_bo_gpu_offset(bo
);
281 *cpu_ptr
= cpu_ptr_tmp
;
283 amdgpu_bo_unreserve(bo
);
287 allocate_mem_kmap_bo_failed
:
289 allocate_mem_pin_bo_failed
:
290 amdgpu_bo_unreserve(bo
);
291 allocate_mem_reserve_bo_failed
:
292 amdgpu_bo_unref(&bo
);
297 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev
*kgd
, void *mem_obj
)
299 struct amdgpu_bo
*bo
= (struct amdgpu_bo
*) mem_obj
;
301 amdgpu_bo_reserve(bo
, true);
302 amdgpu_bo_kunmap(bo
);
304 amdgpu_bo_unreserve(bo
);
305 amdgpu_bo_unref(&(bo
));
308 int amdgpu_amdkfd_alloc_gws(struct kgd_dev
*kgd
, size_t size
,
311 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
312 struct amdgpu_bo
*bo
= NULL
;
313 struct amdgpu_bo_param bp
;
316 memset(&bp
, 0, sizeof(bp
));
319 bp
.domain
= AMDGPU_GEM_DOMAIN_GWS
;
320 bp
.flags
= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
321 bp
.type
= ttm_bo_type_device
;
324 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
327 "failed to allocate gws BO for amdkfd (%d)\n", r
);
335 void amdgpu_amdkfd_free_gws(struct kgd_dev
*kgd
, void *mem_obj
)
337 struct amdgpu_bo
*bo
= (struct amdgpu_bo
*)mem_obj
;
339 amdgpu_bo_unref(&bo
);
342 uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev
*kgd
,
343 enum kgd_engine_type type
)
345 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
349 return adev
->gfx
.pfp_fw_version
;
352 return adev
->gfx
.me_fw_version
;
355 return adev
->gfx
.ce_fw_version
;
357 case KGD_ENGINE_MEC1
:
358 return adev
->gfx
.mec_fw_version
;
360 case KGD_ENGINE_MEC2
:
361 return adev
->gfx
.mec2_fw_version
;
364 return adev
->gfx
.rlc_fw_version
;
366 case KGD_ENGINE_SDMA1
:
367 return adev
->sdma
.instance
[0].fw_version
;
369 case KGD_ENGINE_SDMA2
:
370 return adev
->sdma
.instance
[1].fw_version
;
379 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev
*kgd
,
380 struct kfd_local_mem_info
*mem_info
)
382 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
383 uint64_t address_mask
= adev
->dev
->dma_mask
? ~*adev
->dev
->dma_mask
:
385 resource_size_t aper_limit
= adev
->gmc
.aper_base
+ adev
->gmc
.aper_size
;
387 memset(mem_info
, 0, sizeof(*mem_info
));
388 if (!(adev
->gmc
.aper_base
& address_mask
|| aper_limit
& address_mask
)) {
389 mem_info
->local_mem_size_public
= adev
->gmc
.visible_vram_size
;
390 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
-
391 adev
->gmc
.visible_vram_size
;
393 mem_info
->local_mem_size_public
= 0;
394 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
;
396 mem_info
->vram_width
= adev
->gmc
.vram_width
;
398 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
399 &adev
->gmc
.aper_base
, &aper_limit
,
400 mem_info
->local_mem_size_public
,
401 mem_info
->local_mem_size_private
);
403 if (amdgpu_sriov_vf(adev
))
404 mem_info
->mem_clk_max
= adev
->clock
.default_mclk
/ 100;
405 else if (adev
->powerplay
.pp_funcs
) {
406 if (amdgpu_emu_mode
== 1)
407 mem_info
->mem_clk_max
= 0;
409 mem_info
->mem_clk_max
= amdgpu_dpm_get_mclk(adev
, false) / 100;
411 mem_info
->mem_clk_max
= 100;
414 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev
*kgd
)
416 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
418 if (adev
->gfx
.funcs
->get_gpu_clock_counter
)
419 return adev
->gfx
.funcs
->get_gpu_clock_counter(adev
);
423 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev
*kgd
)
425 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
427 /* the sclk is in quantas of 10kHz */
428 if (amdgpu_sriov_vf(adev
))
429 return adev
->clock
.default_sclk
/ 100;
430 else if (adev
->powerplay
.pp_funcs
)
431 return amdgpu_dpm_get_sclk(adev
, false) / 100;
436 void amdgpu_amdkfd_get_cu_info(struct kgd_dev
*kgd
, struct kfd_cu_info
*cu_info
)
438 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
439 struct amdgpu_cu_info acu_info
= adev
->gfx
.cu_info
;
441 memset(cu_info
, 0, sizeof(*cu_info
));
442 if (sizeof(cu_info
->cu_bitmap
) != sizeof(acu_info
.bitmap
))
445 cu_info
->cu_active_number
= acu_info
.number
;
446 cu_info
->cu_ao_mask
= acu_info
.ao_cu_mask
;
447 memcpy(&cu_info
->cu_bitmap
[0], &acu_info
.bitmap
[0],
448 sizeof(acu_info
.bitmap
));
449 cu_info
->num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
450 cu_info
->num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
451 cu_info
->num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
452 cu_info
->simd_per_cu
= acu_info
.simd_per_cu
;
453 cu_info
->max_waves_per_simd
= acu_info
.max_waves_per_simd
;
454 cu_info
->wave_front_size
= acu_info
.wave_front_size
;
455 cu_info
->max_scratch_slots_per_cu
= acu_info
.max_scratch_slots_per_cu
;
456 cu_info
->lds_size
= acu_info
.lds_size
;
459 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev
*kgd
, int dma_buf_fd
,
460 struct kgd_dev
**dma_buf_kgd
,
461 uint64_t *bo_size
, void *metadata_buffer
,
462 size_t buffer_size
, uint32_t *metadata_size
,
465 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
466 struct dma_buf
*dma_buf
;
467 struct drm_gem_object
*obj
;
468 struct amdgpu_bo
*bo
;
469 uint64_t metadata_flags
;
472 dma_buf
= dma_buf_get(dma_buf_fd
);
474 return PTR_ERR(dma_buf
);
476 if (dma_buf
->ops
!= &amdgpu_dmabuf_ops
)
477 /* Can't handle non-graphics buffers */
481 if (obj
->dev
->driver
!= adev
->ddev
->driver
)
482 /* Can't handle buffers from different drivers */
485 adev
= obj
->dev
->dev_private
;
486 bo
= gem_to_amdgpu_bo(obj
);
487 if (!(bo
->preferred_domains
& (AMDGPU_GEM_DOMAIN_VRAM
|
488 AMDGPU_GEM_DOMAIN_GTT
)))
489 /* Only VRAM and GTT BOs are supported */
494 *dma_buf_kgd
= (struct kgd_dev
*)adev
;
496 *bo_size
= amdgpu_bo_size(bo
);
498 *metadata_size
= bo
->metadata_size
;
500 r
= amdgpu_bo_get_metadata(bo
, metadata_buffer
, buffer_size
,
501 metadata_size
, &metadata_flags
);
503 *flags
= (bo
->preferred_domains
& AMDGPU_GEM_DOMAIN_VRAM
) ?
504 ALLOC_MEM_FLAGS_VRAM
: ALLOC_MEM_FLAGS_GTT
;
506 if (bo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)
507 *flags
|= ALLOC_MEM_FLAGS_PUBLIC
;
511 dma_buf_put(dma_buf
);
515 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev
*kgd
)
517 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
519 return amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
522 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev
*kgd
)
524 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
526 return adev
->gmc
.xgmi
.hive_id
;
528 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev
*dst
, struct kgd_dev
*src
)
530 struct amdgpu_device
*peer_adev
= (struct amdgpu_device
*)src
;
531 struct amdgpu_device
*adev
= (struct amdgpu_device
*)dst
;
532 int ret
= amdgpu_xgmi_get_hops_count(adev
, peer_adev
);
535 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
536 adev
->gmc
.xgmi
.physical_node_id
,
537 peer_adev
->gmc
.xgmi
.physical_node_id
, ret
);
543 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev
*kgd
)
545 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
547 return adev
->rmmio_remap
.bus_addr
;
550 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev
*kgd
)
552 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
554 return adev
->gds
.gws_size
;
557 int amdgpu_amdkfd_submit_ib(struct kgd_dev
*kgd
, enum kgd_engine_type engine
,
558 uint32_t vmid
, uint64_t gpu_addr
,
559 uint32_t *ib_cmd
, uint32_t ib_len
)
561 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
562 struct amdgpu_job
*job
;
563 struct amdgpu_ib
*ib
;
564 struct amdgpu_ring
*ring
;
565 struct dma_fence
*f
= NULL
;
569 case KGD_ENGINE_MEC1
:
570 ring
= &adev
->gfx
.compute_ring
[0];
572 case KGD_ENGINE_SDMA1
:
573 ring
= &adev
->sdma
.instance
[0].ring
;
575 case KGD_ENGINE_SDMA2
:
576 ring
= &adev
->sdma
.instance
[1].ring
;
579 pr_err("Invalid engine in IB submission: %d\n", engine
);
584 ret
= amdgpu_job_alloc(adev
, 1, &job
, NULL
);
589 memset(ib
, 0, sizeof(struct amdgpu_ib
));
591 ib
->gpu_addr
= gpu_addr
;
593 ib
->length_dw
= ib_len
;
594 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
597 ret
= amdgpu_ib_schedule(ring
, 1, ib
, job
, &f
);
599 DRM_ERROR("amdgpu: failed to schedule IB.\n");
603 ret
= dma_fence_wait(f
, false);
607 amdgpu_job_free(job
);
612 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev
*kgd
, bool idle
)
614 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
616 amdgpu_dpm_switch_power_profile(adev
,
617 PP_SMC_POWER_PROFILE_COMPUTE
,
621 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device
*adev
, u32 vmid
)
624 if ((1 << vmid
) & compute_vmid_bitmap
)
631 int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev
*kgd
, uint16_t vmid
)
633 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
635 if (adev
->family
== AMDGPU_FAMILY_AI
) {
638 for (i
= 0; i
< adev
->num_vmhubs
; i
++)
639 amdgpu_gmc_flush_gpu_tlb(adev
, vmid
, i
, 0);
641 amdgpu_gmc_flush_gpu_tlb(adev
, vmid
, AMDGPU_GFXHUB_0
, 0);
647 int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev
*kgd
, uint16_t pasid
)
649 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
650 uint32_t flush_type
= 0;
651 bool all_hub
= false;
653 if (adev
->gmc
.xgmi
.num_physical_nodes
&&
654 adev
->asic_type
== CHIP_VEGA20
)
657 if (adev
->family
== AMDGPU_FAMILY_AI
)
660 return amdgpu_gmc_flush_gpu_tlb_pasid(adev
, pasid
, flush_type
, all_hub
);
663 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev
*kgd
)
665 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
667 return adev
->have_atomics_support
;
670 #ifndef CONFIG_HSA_AMD
671 bool amdkfd_fence_check_mm(struct dma_fence
*f
, struct mm_struct
*mm
)
676 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo
*bo
)
680 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device
*adev
,
681 struct amdgpu_vm
*vm
)
685 struct amdgpu_amdkfd_fence
*to_amdgpu_amdkfd_fence(struct dma_fence
*f
)
690 int amdgpu_amdkfd_evict_userptr(struct kgd_mem
*mem
, struct mm_struct
*mm
)
695 struct kfd_dev
*kgd2kfd_probe(struct kgd_dev
*kgd
, struct pci_dev
*pdev
,
696 unsigned int asic_type
, bool vf
)
701 bool kgd2kfd_device_init(struct kfd_dev
*kfd
,
702 struct drm_device
*ddev
,
703 const struct kgd2kfd_shared_resources
*gpu_resources
)
708 void kgd2kfd_device_exit(struct kfd_dev
*kfd
)
712 void kgd2kfd_exit(void)
716 void kgd2kfd_suspend(struct kfd_dev
*kfd
)
720 int kgd2kfd_resume(struct kfd_dev
*kfd
)
725 int kgd2kfd_pre_reset(struct kfd_dev
*kfd
)
730 int kgd2kfd_post_reset(struct kfd_dev
*kfd
)
735 void kgd2kfd_interrupt(struct kfd_dev
*kfd
, const void *ih_ring_entry
)
739 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev
*kfd
)