2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/module.h>
24 #include <linux/fdtable.h>
25 #include <linux/uaccess.h>
26 #include <linux/firmware.h>
29 #include "amdgpu_amdkfd.h"
30 #include "amdgpu_ucode.h"
32 #include "gca/gfx_8_0_sh_mask.h"
33 #include "gca/gfx_8_0_d.h"
34 #include "gca/gfx_8_0_enum.h"
35 #include "oss/oss_3_0_sh_mask.h"
36 #include "oss/oss_3_0_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 #include "gmc/gmc_8_1_d.h"
39 #include "vi_structs.h"
42 enum hqd_dequeue_request_type
{
51 * Register access functions
54 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
55 uint32_t sh_mem_config
,
56 uint32_t sh_mem_ape1_base
, uint32_t sh_mem_ape1_limit
,
57 uint32_t sh_mem_bases
);
58 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
60 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
);
61 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
62 uint32_t queue_id
, uint32_t __user
*wptr
,
63 uint32_t wptr_shift
, uint32_t wptr_mask
,
64 struct mm_struct
*mm
);
65 static int kgd_hqd_dump(struct kgd_dev
*kgd
,
66 uint32_t pipe_id
, uint32_t queue_id
,
67 uint32_t (**dump
)[2], uint32_t *n_regs
);
68 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
,
69 uint32_t __user
*wptr
, struct mm_struct
*mm
);
70 static int kgd_hqd_sdma_dump(struct kgd_dev
*kgd
,
71 uint32_t engine_id
, uint32_t queue_id
,
72 uint32_t (**dump
)[2], uint32_t *n_regs
);
73 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
74 uint32_t pipe_id
, uint32_t queue_id
);
75 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
);
76 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
77 enum kfd_preempt_type reset_type
,
78 unsigned int utimeout
, uint32_t pipe_id
,
80 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
81 unsigned int utimeout
);
82 static int kgd_address_watch_disable(struct kgd_dev
*kgd
);
83 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
84 unsigned int watch_point_id
,
88 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
89 uint32_t gfx_index_val
,
91 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
92 unsigned int watch_point_id
,
93 unsigned int reg_offset
);
95 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
,
97 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
99 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
);
100 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
101 uint64_t va
, uint32_t vmid
);
102 static void set_vm_context_page_table_base(struct kgd_dev
*kgd
, uint32_t vmid
,
103 uint32_t page_table_base
);
104 static int invalidate_tlbs(struct kgd_dev
*kgd
, uint16_t pasid
);
105 static int invalidate_tlbs_vmid(struct kgd_dev
*kgd
, uint16_t vmid
);
107 /* Because of REG_GET_FIELD() being used, we put this function in the
108 * asic specific file.
110 static int get_tile_config(struct kgd_dev
*kgd
,
111 struct tile_config
*config
)
113 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
115 config
->gb_addr_config
= adev
->gfx
.config
.gb_addr_config
;
116 config
->num_banks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
117 MC_ARB_RAMCFG
, NOOFBANK
);
118 config
->num_ranks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
119 MC_ARB_RAMCFG
, NOOFRANKS
);
121 config
->tile_config_ptr
= adev
->gfx
.config
.tile_mode_array
;
122 config
->num_tile_configs
=
123 ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
124 config
->macro_tile_config_ptr
=
125 adev
->gfx
.config
.macrotile_mode_array
;
126 config
->num_macro_tile_configs
=
127 ARRAY_SIZE(adev
->gfx
.config
.macrotile_mode_array
);
132 static const struct kfd2kgd_calls kfd2kgd
= {
133 .init_gtt_mem_allocation
= alloc_gtt_mem
,
134 .free_gtt_mem
= free_gtt_mem
,
135 .get_local_mem_info
= get_local_mem_info
,
136 .get_gpu_clock_counter
= get_gpu_clock_counter
,
137 .get_max_engine_clock_in_mhz
= get_max_engine_clock_in_mhz
,
138 .alloc_pasid
= amdgpu_pasid_alloc
,
139 .free_pasid
= amdgpu_pasid_free
,
140 .program_sh_mem_settings
= kgd_program_sh_mem_settings
,
141 .set_pasid_vmid_mapping
= kgd_set_pasid_vmid_mapping
,
142 .init_interrupts
= kgd_init_interrupts
,
143 .hqd_load
= kgd_hqd_load
,
144 .hqd_sdma_load
= kgd_hqd_sdma_load
,
145 .hqd_dump
= kgd_hqd_dump
,
146 .hqd_sdma_dump
= kgd_hqd_sdma_dump
,
147 .hqd_is_occupied
= kgd_hqd_is_occupied
,
148 .hqd_sdma_is_occupied
= kgd_hqd_sdma_is_occupied
,
149 .hqd_destroy
= kgd_hqd_destroy
,
150 .hqd_sdma_destroy
= kgd_hqd_sdma_destroy
,
151 .address_watch_disable
= kgd_address_watch_disable
,
152 .address_watch_execute
= kgd_address_watch_execute
,
153 .wave_control_execute
= kgd_wave_control_execute
,
154 .address_watch_get_offset
= kgd_address_watch_get_offset
,
155 .get_atc_vmid_pasid_mapping_pasid
=
156 get_atc_vmid_pasid_mapping_pasid
,
157 .get_atc_vmid_pasid_mapping_valid
=
158 get_atc_vmid_pasid_mapping_valid
,
159 .get_fw_version
= get_fw_version
,
160 .set_scratch_backing_va
= set_scratch_backing_va
,
161 .get_tile_config
= get_tile_config
,
162 .get_cu_info
= get_cu_info
,
163 .get_vram_usage
= amdgpu_amdkfd_get_vram_usage
,
164 .create_process_vm
= amdgpu_amdkfd_gpuvm_create_process_vm
,
165 .acquire_process_vm
= amdgpu_amdkfd_gpuvm_acquire_process_vm
,
166 .destroy_process_vm
= amdgpu_amdkfd_gpuvm_destroy_process_vm
,
167 .get_process_page_dir
= amdgpu_amdkfd_gpuvm_get_process_page_dir
,
168 .set_vm_context_page_table_base
= set_vm_context_page_table_base
,
169 .alloc_memory_of_gpu
= amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu
,
170 .free_memory_of_gpu
= amdgpu_amdkfd_gpuvm_free_memory_of_gpu
,
171 .map_memory_to_gpu
= amdgpu_amdkfd_gpuvm_map_memory_to_gpu
,
172 .unmap_memory_to_gpu
= amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu
,
173 .sync_memory
= amdgpu_amdkfd_gpuvm_sync_memory
,
174 .map_gtt_bo_to_kernel
= amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel
,
175 .restore_process_bos
= amdgpu_amdkfd_gpuvm_restore_process_bos
,
176 .invalidate_tlbs
= invalidate_tlbs
,
177 .invalidate_tlbs_vmid
= invalidate_tlbs_vmid
,
178 .submit_ib
= amdgpu_amdkfd_submit_ib
,
179 .get_vm_fault_info
= amdgpu_amdkfd_gpuvm_get_vm_fault_info
,
180 .gpu_recover
= amdgpu_amdkfd_gpu_reset
,
181 .set_compute_idle
= amdgpu_amdkfd_set_compute_idle
184 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_8_0_get_functions(void)
186 return (struct kfd2kgd_calls
*)&kfd2kgd
;
189 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
191 return (struct amdgpu_device
*)kgd
;
194 static void lock_srbm(struct kgd_dev
*kgd
, uint32_t mec
, uint32_t pipe
,
195 uint32_t queue
, uint32_t vmid
)
197 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
198 uint32_t value
= PIPEID(pipe
) | MEID(mec
) | VMID(vmid
) | QUEUEID(queue
);
200 mutex_lock(&adev
->srbm_mutex
);
201 WREG32(mmSRBM_GFX_CNTL
, value
);
204 static void unlock_srbm(struct kgd_dev
*kgd
)
206 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
208 WREG32(mmSRBM_GFX_CNTL
, 0);
209 mutex_unlock(&adev
->srbm_mutex
);
212 static void acquire_queue(struct kgd_dev
*kgd
, uint32_t pipe_id
,
215 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
217 uint32_t mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
218 uint32_t pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
220 lock_srbm(kgd
, mec
, pipe
, queue_id
, 0);
223 static void release_queue(struct kgd_dev
*kgd
)
228 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
229 uint32_t sh_mem_config
,
230 uint32_t sh_mem_ape1_base
,
231 uint32_t sh_mem_ape1_limit
,
232 uint32_t sh_mem_bases
)
234 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
236 lock_srbm(kgd
, 0, 0, 0, vmid
);
238 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
239 WREG32(mmSH_MEM_APE1_BASE
, sh_mem_ape1_base
);
240 WREG32(mmSH_MEM_APE1_LIMIT
, sh_mem_ape1_limit
);
241 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
246 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
249 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
252 * We have to assume that there is no outstanding mapping.
253 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
254 * a mapping is in progress or because a mapping finished
255 * and the SW cleared it.
256 * So the protocol is to always wait & clear.
258 uint32_t pasid_mapping
= (pasid
== 0) ? 0 : (uint32_t)pasid
|
259 ATC_VMID0_PASID_MAPPING__VALID_MASK
;
261 WREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
, pasid_mapping
);
263 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
) & (1U << vmid
)))
265 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
, 1U << vmid
);
267 /* Mapping vmid to pasid also for IH block */
268 WREG32(mmIH_VMID_0_LUT
+ vmid
, pasid_mapping
);
273 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
)
275 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
279 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
280 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
282 lock_srbm(kgd
, mec
, pipe
, 0, 0);
284 WREG32(mmCPC_INT_CNTL
, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
);
291 static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd
*m
)
295 retval
= m
->sdma_engine_id
* SDMA1_REGISTER_OFFSET
+
296 m
->sdma_queue_id
* KFD_VI_SDMA_QUEUE_OFFSET
;
297 pr_debug("kfd: sdma base address: 0x%x\n", retval
);
302 static inline struct vi_mqd
*get_mqd(void *mqd
)
304 return (struct vi_mqd
*)mqd
;
307 static inline struct vi_sdma_mqd
*get_sdma_mqd(void *mqd
)
309 return (struct vi_sdma_mqd
*)mqd
;
312 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
313 uint32_t queue_id
, uint32_t __user
*wptr
,
314 uint32_t wptr_shift
, uint32_t wptr_mask
,
315 struct mm_struct
*mm
)
317 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
320 uint32_t reg
, wptr_val
, data
;
321 bool valid_wptr
= false;
325 acquire_queue(kgd
, pipe_id
, queue_id
);
327 /* HIQ is set during driver init period with vmid set to 0*/
328 if (m
->cp_hqd_vmid
== 0) {
329 uint32_t value
, mec
, pipe
;
331 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
332 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
334 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
335 mec
, pipe
, queue_id
);
336 value
= RREG32(mmRLC_CP_SCHEDULERS
);
337 value
= REG_SET_FIELD(value
, RLC_CP_SCHEDULERS
, scheduler1
,
338 ((mec
<< 5) | (pipe
<< 3) | queue_id
| 0x80));
339 WREG32(mmRLC_CP_SCHEDULERS
, value
);
342 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
343 mqd_hqd
= &m
->cp_mqd_base_addr_lo
;
345 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_HQD_EOP_CONTROL
; reg
++)
346 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
348 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
349 * This is safe since EOP RPTR==WPTR for any inactive HQD
350 * on ASICs that do not support context-save.
351 * EOP writes/reads can start anywhere in the ring.
353 if (get_amdgpu_device(kgd
)->asic_type
!= CHIP_TONGA
) {
354 WREG32(mmCP_HQD_EOP_RPTR
, m
->cp_hqd_eop_rptr
);
355 WREG32(mmCP_HQD_EOP_WPTR
, m
->cp_hqd_eop_wptr
);
356 WREG32(mmCP_HQD_EOP_WPTR_MEM
, m
->cp_hqd_eop_wptr_mem
);
359 for (reg
= mmCP_HQD_EOP_EVENTS
; reg
<= mmCP_HQD_ERROR
; reg
++)
360 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
362 /* Copy userspace write pointer value to register.
363 * Activate doorbell logic to monitor subsequent changes.
365 data
= REG_SET_FIELD(m
->cp_hqd_pq_doorbell_control
,
366 CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
367 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, data
);
369 /* read_user_ptr may take the mm->mmap_sem.
370 * release srbm_mutex to avoid circular dependency between
371 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
374 valid_wptr
= read_user_wptr(mm
, wptr
, wptr_val
);
375 acquire_queue(kgd
, pipe_id
, queue_id
);
377 WREG32(mmCP_HQD_PQ_WPTR
, (wptr_val
<< wptr_shift
) & wptr_mask
);
379 data
= REG_SET_FIELD(m
->cp_hqd_active
, CP_HQD_ACTIVE
, ACTIVE
, 1);
380 WREG32(mmCP_HQD_ACTIVE
, data
);
387 static int kgd_hqd_dump(struct kgd_dev
*kgd
,
388 uint32_t pipe_id
, uint32_t queue_id
,
389 uint32_t (**dump
)[2], uint32_t *n_regs
)
391 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
393 #define HQD_N_REGS (54+4)
394 #define DUMP_REG(addr) do { \
395 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
397 (*dump)[i][0] = (addr) << 2; \
398 (*dump)[i++][1] = RREG32(addr); \
401 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
405 acquire_queue(kgd
, pipe_id
, queue_id
);
407 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0
);
408 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1
);
409 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2
);
410 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3
);
412 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_HQD_EOP_DONES
; reg
++)
417 WARN_ON_ONCE(i
!= HQD_N_REGS
);
423 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
,
424 uint32_t __user
*wptr
, struct mm_struct
*mm
)
426 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
427 struct vi_sdma_mqd
*m
;
428 unsigned long end_jiffies
;
429 uint32_t sdma_base_addr
;
432 m
= get_sdma_mqd(mqd
);
433 sdma_base_addr
= get_sdma_base_addr(m
);
434 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
435 m
->sdmax_rlcx_rb_cntl
& (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
));
437 end_jiffies
= msecs_to_jiffies(2000) + jiffies
;
439 data
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
440 if (data
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
442 if (time_after(jiffies
, end_jiffies
))
444 usleep_range(500, 1000);
446 if (m
->sdma_engine_id
) {
447 data
= RREG32(mmSDMA1_GFX_CONTEXT_CNTL
);
448 data
= REG_SET_FIELD(data
, SDMA1_GFX_CONTEXT_CNTL
,
450 WREG32(mmSDMA1_GFX_CONTEXT_CNTL
, data
);
452 data
= RREG32(mmSDMA0_GFX_CONTEXT_CNTL
);
453 data
= REG_SET_FIELD(data
, SDMA0_GFX_CONTEXT_CNTL
,
455 WREG32(mmSDMA0_GFX_CONTEXT_CNTL
, data
);
458 data
= REG_SET_FIELD(m
->sdmax_rlcx_doorbell
, SDMA0_RLC0_DOORBELL
,
460 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
, data
);
461 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR
, m
->sdmax_rlcx_rb_rptr
);
463 if (read_user_wptr(mm
, wptr
, data
))
464 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_WPTR
, data
);
466 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_WPTR
,
467 m
->sdmax_rlcx_rb_rptr
);
469 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_VIRTUAL_ADDR
,
470 m
->sdmax_rlcx_virtual_addr
);
471 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE
, m
->sdmax_rlcx_rb_base
);
472 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE_HI
,
473 m
->sdmax_rlcx_rb_base_hi
);
474 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR_ADDR_LO
,
475 m
->sdmax_rlcx_rb_rptr_addr_lo
);
476 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR_ADDR_HI
,
477 m
->sdmax_rlcx_rb_rptr_addr_hi
);
479 data
= REG_SET_FIELD(m
->sdmax_rlcx_rb_cntl
, SDMA0_RLC0_RB_CNTL
,
481 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
, data
);
486 static int kgd_hqd_sdma_dump(struct kgd_dev
*kgd
,
487 uint32_t engine_id
, uint32_t queue_id
,
488 uint32_t (**dump
)[2], uint32_t *n_regs
)
490 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
491 uint32_t sdma_offset
= engine_id
* SDMA1_REGISTER_OFFSET
+
492 queue_id
* KFD_VI_SDMA_QUEUE_OFFSET
;
495 #define HQD_N_REGS (19+4+2+3+7)
497 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
501 for (reg
= mmSDMA0_RLC0_RB_CNTL
; reg
<= mmSDMA0_RLC0_DOORBELL
; reg
++)
502 DUMP_REG(sdma_offset
+ reg
);
503 for (reg
= mmSDMA0_RLC0_VIRTUAL_ADDR
; reg
<= mmSDMA0_RLC0_WATERMARK
;
505 DUMP_REG(sdma_offset
+ reg
);
506 for (reg
= mmSDMA0_RLC0_CSA_ADDR_LO
; reg
<= mmSDMA0_RLC0_CSA_ADDR_HI
;
508 DUMP_REG(sdma_offset
+ reg
);
509 for (reg
= mmSDMA0_RLC0_IB_SUB_REMAIN
; reg
<= mmSDMA0_RLC0_DUMMY_REG
;
511 DUMP_REG(sdma_offset
+ reg
);
512 for (reg
= mmSDMA0_RLC0_MIDCMD_DATA0
; reg
<= mmSDMA0_RLC0_MIDCMD_CNTL
;
514 DUMP_REG(sdma_offset
+ reg
);
516 WARN_ON_ONCE(i
!= HQD_N_REGS
);
522 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
523 uint32_t pipe_id
, uint32_t queue_id
)
525 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
530 acquire_queue(kgd
, pipe_id
, queue_id
);
531 act
= RREG32(mmCP_HQD_ACTIVE
);
533 low
= lower_32_bits(queue_address
>> 8);
534 high
= upper_32_bits(queue_address
>> 8);
536 if (low
== RREG32(mmCP_HQD_PQ_BASE
) &&
537 high
== RREG32(mmCP_HQD_PQ_BASE_HI
))
544 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
)
546 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
547 struct vi_sdma_mqd
*m
;
548 uint32_t sdma_base_addr
;
549 uint32_t sdma_rlc_rb_cntl
;
551 m
= get_sdma_mqd(mqd
);
552 sdma_base_addr
= get_sdma_base_addr(m
);
554 sdma_rlc_rb_cntl
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
556 if (sdma_rlc_rb_cntl
& SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
)
562 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
563 enum kfd_preempt_type reset_type
,
564 unsigned int utimeout
, uint32_t pipe_id
,
567 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
569 enum hqd_dequeue_request_type type
;
570 unsigned long flags
, end_jiffies
;
572 struct vi_mqd
*m
= get_mqd(mqd
);
574 if (adev
->in_gpu_reset
)
577 acquire_queue(kgd
, pipe_id
, queue_id
);
579 if (m
->cp_hqd_vmid
== 0)
580 WREG32_FIELD(RLC_CP_SCHEDULERS
, scheduler1
, 0);
582 switch (reset_type
) {
583 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
:
586 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET
:
594 /* Workaround: If IQ timer is active and the wait time is close to or
595 * equal to 0, dequeueing is not safe. Wait until either the wait time
596 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
597 * cleared before continuing. Also, ensure wait times are set to at
600 local_irq_save(flags
);
602 retry
= 5000; /* wait for 500 usecs at maximum */
604 temp
= RREG32(mmCP_HQD_IQ_TIMER
);
605 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, PROCESSING_IQ
)) {
606 pr_debug("HW is processing IQ\n");
609 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, ACTIVE
)) {
610 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, RETRY_TYPE
)
611 == 3) /* SEM-rearm is safe */
613 /* Wait time 3 is safe for CP, but our MMIO read/write
614 * time is close to 1 microsecond, so check for 10 to
615 * leave more buffer room
617 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, WAIT_TIME
)
620 pr_debug("IQ timer is active\n");
625 pr_err("CP HQD IQ timer status time out\n");
633 temp
= RREG32(mmCP_HQD_DEQUEUE_REQUEST
);
634 if (!(temp
& CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK
))
636 pr_debug("Dequeue request is pending\n");
639 pr_err("CP HQD dequeue request time out\n");
645 local_irq_restore(flags
);
648 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, type
);
650 end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
652 temp
= RREG32(mmCP_HQD_ACTIVE
);
653 if (!(temp
& CP_HQD_ACTIVE__ACTIVE_MASK
))
655 if (time_after(jiffies
, end_jiffies
)) {
656 pr_err("cp queue preemption time out.\n");
660 usleep_range(500, 1000);
667 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
668 unsigned int utimeout
)
670 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
671 struct vi_sdma_mqd
*m
;
672 uint32_t sdma_base_addr
;
674 unsigned long end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
676 m
= get_sdma_mqd(mqd
);
677 sdma_base_addr
= get_sdma_base_addr(m
);
679 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
680 temp
= temp
& ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
;
681 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
, temp
);
684 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
685 if (temp
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
687 if (time_after(jiffies
, end_jiffies
))
689 usleep_range(500, 1000);
692 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
, 0);
693 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
694 RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
) |
695 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
);
697 m
->sdmax_rlcx_rb_rptr
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR
);
702 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
,
706 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
708 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
709 return reg
& ATC_VMID0_PASID_MAPPING__VALID_MASK
;
712 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
716 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
718 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
719 return reg
& ATC_VMID0_PASID_MAPPING__PASID_MASK
;
722 static int kgd_address_watch_disable(struct kgd_dev
*kgd
)
727 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
728 unsigned int watch_point_id
,
736 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
737 uint32_t gfx_index_val
,
740 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
743 mutex_lock(&adev
->grbm_idx_mutex
);
745 WREG32(mmGRBM_GFX_INDEX
, gfx_index_val
);
746 WREG32(mmSQ_CMD
, sq_cmd
);
748 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
749 INSTANCE_BROADCAST_WRITES
, 1);
750 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
751 SH_BROADCAST_WRITES
, 1);
752 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
753 SE_BROADCAST_WRITES
, 1);
755 WREG32(mmGRBM_GFX_INDEX
, data
);
756 mutex_unlock(&adev
->grbm_idx_mutex
);
761 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
762 unsigned int watch_point_id
,
763 unsigned int reg_offset
)
768 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
769 uint64_t va
, uint32_t vmid
)
771 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
773 lock_srbm(kgd
, 0, 0, 0, vmid
);
774 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID
, va
);
778 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
)
780 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
781 const union amdgpu_firmware_header
*hdr
;
785 hdr
= (const union amdgpu_firmware_header
*)
786 adev
->gfx
.pfp_fw
->data
;
790 hdr
= (const union amdgpu_firmware_header
*)
791 adev
->gfx
.me_fw
->data
;
795 hdr
= (const union amdgpu_firmware_header
*)
796 adev
->gfx
.ce_fw
->data
;
799 case KGD_ENGINE_MEC1
:
800 hdr
= (const union amdgpu_firmware_header
*)
801 adev
->gfx
.mec_fw
->data
;
804 case KGD_ENGINE_MEC2
:
805 hdr
= (const union amdgpu_firmware_header
*)
806 adev
->gfx
.mec2_fw
->data
;
810 hdr
= (const union amdgpu_firmware_header
*)
811 adev
->gfx
.rlc_fw
->data
;
814 case KGD_ENGINE_SDMA1
:
815 hdr
= (const union amdgpu_firmware_header
*)
816 adev
->sdma
.instance
[0].fw
->data
;
819 case KGD_ENGINE_SDMA2
:
820 hdr
= (const union amdgpu_firmware_header
*)
821 adev
->sdma
.instance
[1].fw
->data
;
831 /* Only 12 bit in use*/
832 return hdr
->common
.ucode_version
;
835 static void set_vm_context_page_table_base(struct kgd_dev
*kgd
, uint32_t vmid
,
836 uint32_t page_table_base
)
838 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
840 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
)) {
841 pr_err("trying to set page table base for wrong VMID\n");
844 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vmid
- 8, page_table_base
);
847 static int invalidate_tlbs(struct kgd_dev
*kgd
, uint16_t pasid
)
849 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
853 if (adev
->in_gpu_reset
)
856 for (vmid
= 0; vmid
< 16; vmid
++) {
857 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
))
860 tmp
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
861 if ((tmp
& ATC_VMID0_PASID_MAPPING__VALID_MASK
) &&
862 (tmp
& ATC_VMID0_PASID_MAPPING__PASID_MASK
) == pasid
) {
863 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
864 RREG32(mmVM_INVALIDATE_RESPONSE
);
872 static int invalidate_tlbs_vmid(struct kgd_dev
*kgd
, uint16_t vmid
)
874 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
876 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
)) {
877 pr_err("non kfd vmid %d\n", vmid
);
881 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
882 RREG32(mmVM_INVALIDATE_RESPONSE
);