2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/module.h>
24 #include <linux/fdtable.h>
25 #include <linux/uaccess.h>
26 #include <linux/firmware.h>
27 #include <linux/mmu_context.h>
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_ucode.h"
33 #include "gca/gfx_8_0_sh_mask.h"
34 #include "gca/gfx_8_0_d.h"
35 #include "gca/gfx_8_0_enum.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 #include "oss/oss_3_0_d.h"
38 #include "gmc/gmc_8_1_sh_mask.h"
39 #include "gmc/gmc_8_1_d.h"
40 #include "vi_structs.h"
43 enum hqd_dequeue_request_type
{
50 * Register access functions
53 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
54 uint32_t sh_mem_config
,
55 uint32_t sh_mem_ape1_base
, uint32_t sh_mem_ape1_limit
,
56 uint32_t sh_mem_bases
);
57 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
59 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
);
60 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
61 uint32_t queue_id
, uint32_t __user
*wptr
,
62 uint32_t wptr_shift
, uint32_t wptr_mask
,
63 struct mm_struct
*mm
);
64 static int kgd_hqd_dump(struct kgd_dev
*kgd
,
65 uint32_t pipe_id
, uint32_t queue_id
,
66 uint32_t (**dump
)[2], uint32_t *n_regs
);
67 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
,
68 uint32_t __user
*wptr
, struct mm_struct
*mm
);
69 static int kgd_hqd_sdma_dump(struct kgd_dev
*kgd
,
70 uint32_t engine_id
, uint32_t queue_id
,
71 uint32_t (**dump
)[2], uint32_t *n_regs
);
72 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
73 uint32_t pipe_id
, uint32_t queue_id
);
74 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
);
75 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
76 enum kfd_preempt_type reset_type
,
77 unsigned int utimeout
, uint32_t pipe_id
,
79 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
80 unsigned int utimeout
);
81 static int kgd_address_watch_disable(struct kgd_dev
*kgd
);
82 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
83 unsigned int watch_point_id
,
87 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
88 uint32_t gfx_index_val
,
90 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
91 unsigned int watch_point_id
,
92 unsigned int reg_offset
);
94 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
,
96 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
98 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
);
99 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
100 uint64_t va
, uint32_t vmid
);
101 static void set_vm_context_page_table_base(struct kgd_dev
*kgd
, uint32_t vmid
,
102 uint64_t page_table_base
);
103 static int invalidate_tlbs(struct kgd_dev
*kgd
, uint16_t pasid
);
104 static int invalidate_tlbs_vmid(struct kgd_dev
*kgd
, uint16_t vmid
);
106 /* Because of REG_GET_FIELD() being used, we put this function in the
107 * asic specific file.
109 static int get_tile_config(struct kgd_dev
*kgd
,
110 struct tile_config
*config
)
112 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
114 config
->gb_addr_config
= adev
->gfx
.config
.gb_addr_config
;
115 config
->num_banks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
116 MC_ARB_RAMCFG
, NOOFBANK
);
117 config
->num_ranks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
118 MC_ARB_RAMCFG
, NOOFRANKS
);
120 config
->tile_config_ptr
= adev
->gfx
.config
.tile_mode_array
;
121 config
->num_tile_configs
=
122 ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
123 config
->macro_tile_config_ptr
=
124 adev
->gfx
.config
.macrotile_mode_array
;
125 config
->num_macro_tile_configs
=
126 ARRAY_SIZE(adev
->gfx
.config
.macrotile_mode_array
);
131 static const struct kfd2kgd_calls kfd2kgd
= {
132 .program_sh_mem_settings
= kgd_program_sh_mem_settings
,
133 .set_pasid_vmid_mapping
= kgd_set_pasid_vmid_mapping
,
134 .init_interrupts
= kgd_init_interrupts
,
135 .hqd_load
= kgd_hqd_load
,
136 .hqd_sdma_load
= kgd_hqd_sdma_load
,
137 .hqd_dump
= kgd_hqd_dump
,
138 .hqd_sdma_dump
= kgd_hqd_sdma_dump
,
139 .hqd_is_occupied
= kgd_hqd_is_occupied
,
140 .hqd_sdma_is_occupied
= kgd_hqd_sdma_is_occupied
,
141 .hqd_destroy
= kgd_hqd_destroy
,
142 .hqd_sdma_destroy
= kgd_hqd_sdma_destroy
,
143 .address_watch_disable
= kgd_address_watch_disable
,
144 .address_watch_execute
= kgd_address_watch_execute
,
145 .wave_control_execute
= kgd_wave_control_execute
,
146 .address_watch_get_offset
= kgd_address_watch_get_offset
,
147 .get_atc_vmid_pasid_mapping_pasid
=
148 get_atc_vmid_pasid_mapping_pasid
,
149 .get_atc_vmid_pasid_mapping_valid
=
150 get_atc_vmid_pasid_mapping_valid
,
151 .get_fw_version
= get_fw_version
,
152 .set_scratch_backing_va
= set_scratch_backing_va
,
153 .get_tile_config
= get_tile_config
,
154 .set_vm_context_page_table_base
= set_vm_context_page_table_base
,
155 .invalidate_tlbs
= invalidate_tlbs
,
156 .invalidate_tlbs_vmid
= invalidate_tlbs_vmid
,
159 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_8_0_get_functions(void)
161 return (struct kfd2kgd_calls
*)&kfd2kgd
;
164 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
166 return (struct amdgpu_device
*)kgd
;
169 static void lock_srbm(struct kgd_dev
*kgd
, uint32_t mec
, uint32_t pipe
,
170 uint32_t queue
, uint32_t vmid
)
172 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
173 uint32_t value
= PIPEID(pipe
) | MEID(mec
) | VMID(vmid
) | QUEUEID(queue
);
175 mutex_lock(&adev
->srbm_mutex
);
176 WREG32(mmSRBM_GFX_CNTL
, value
);
179 static void unlock_srbm(struct kgd_dev
*kgd
)
181 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
183 WREG32(mmSRBM_GFX_CNTL
, 0);
184 mutex_unlock(&adev
->srbm_mutex
);
187 static void acquire_queue(struct kgd_dev
*kgd
, uint32_t pipe_id
,
190 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
192 uint32_t mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
193 uint32_t pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
195 lock_srbm(kgd
, mec
, pipe
, queue_id
, 0);
198 static void release_queue(struct kgd_dev
*kgd
)
203 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
204 uint32_t sh_mem_config
,
205 uint32_t sh_mem_ape1_base
,
206 uint32_t sh_mem_ape1_limit
,
207 uint32_t sh_mem_bases
)
209 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
211 lock_srbm(kgd
, 0, 0, 0, vmid
);
213 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
214 WREG32(mmSH_MEM_APE1_BASE
, sh_mem_ape1_base
);
215 WREG32(mmSH_MEM_APE1_LIMIT
, sh_mem_ape1_limit
);
216 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
221 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
224 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
227 * We have to assume that there is no outstanding mapping.
228 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
229 * a mapping is in progress or because a mapping finished
230 * and the SW cleared it.
231 * So the protocol is to always wait & clear.
233 uint32_t pasid_mapping
= (pasid
== 0) ? 0 : (uint32_t)pasid
|
234 ATC_VMID0_PASID_MAPPING__VALID_MASK
;
236 WREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
, pasid_mapping
);
238 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
) & (1U << vmid
)))
240 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
, 1U << vmid
);
242 /* Mapping vmid to pasid also for IH block */
243 WREG32(mmIH_VMID_0_LUT
+ vmid
, pasid_mapping
);
248 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
)
250 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
254 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
255 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
257 lock_srbm(kgd
, mec
, pipe
, 0, 0);
259 WREG32(mmCPC_INT_CNTL
, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
|
260 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK
);
267 static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd
*m
)
271 retval
= m
->sdma_engine_id
* SDMA1_REGISTER_OFFSET
+
272 m
->sdma_queue_id
* KFD_VI_SDMA_QUEUE_OFFSET
;
273 pr_debug("kfd: sdma base address: 0x%x\n", retval
);
278 static inline struct vi_mqd
*get_mqd(void *mqd
)
280 return (struct vi_mqd
*)mqd
;
283 static inline struct vi_sdma_mqd
*get_sdma_mqd(void *mqd
)
285 return (struct vi_sdma_mqd
*)mqd
;
288 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
289 uint32_t queue_id
, uint32_t __user
*wptr
,
290 uint32_t wptr_shift
, uint32_t wptr_mask
,
291 struct mm_struct
*mm
)
293 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
296 uint32_t reg
, wptr_val
, data
;
297 bool valid_wptr
= false;
301 acquire_queue(kgd
, pipe_id
, queue_id
);
303 /* HIQ is set during driver init period with vmid set to 0*/
304 if (m
->cp_hqd_vmid
== 0) {
305 uint32_t value
, mec
, pipe
;
307 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
308 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
310 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
311 mec
, pipe
, queue_id
);
312 value
= RREG32(mmRLC_CP_SCHEDULERS
);
313 value
= REG_SET_FIELD(value
, RLC_CP_SCHEDULERS
, scheduler1
,
314 ((mec
<< 5) | (pipe
<< 3) | queue_id
| 0x80));
315 WREG32(mmRLC_CP_SCHEDULERS
, value
);
318 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
319 mqd_hqd
= &m
->cp_mqd_base_addr_lo
;
321 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_HQD_EOP_CONTROL
; reg
++)
322 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
324 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
325 * This is safe since EOP RPTR==WPTR for any inactive HQD
326 * on ASICs that do not support context-save.
327 * EOP writes/reads can start anywhere in the ring.
329 if (get_amdgpu_device(kgd
)->asic_type
!= CHIP_TONGA
) {
330 WREG32(mmCP_HQD_EOP_RPTR
, m
->cp_hqd_eop_rptr
);
331 WREG32(mmCP_HQD_EOP_WPTR
, m
->cp_hqd_eop_wptr
);
332 WREG32(mmCP_HQD_EOP_WPTR_MEM
, m
->cp_hqd_eop_wptr_mem
);
335 for (reg
= mmCP_HQD_EOP_EVENTS
; reg
<= mmCP_HQD_ERROR
; reg
++)
336 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
338 /* Copy userspace write pointer value to register.
339 * Activate doorbell logic to monitor subsequent changes.
341 data
= REG_SET_FIELD(m
->cp_hqd_pq_doorbell_control
,
342 CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
343 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, data
);
345 /* read_user_ptr may take the mm->mmap_sem.
346 * release srbm_mutex to avoid circular dependency between
347 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
350 valid_wptr
= read_user_wptr(mm
, wptr
, wptr_val
);
351 acquire_queue(kgd
, pipe_id
, queue_id
);
353 WREG32(mmCP_HQD_PQ_WPTR
, (wptr_val
<< wptr_shift
) & wptr_mask
);
355 data
= REG_SET_FIELD(m
->cp_hqd_active
, CP_HQD_ACTIVE
, ACTIVE
, 1);
356 WREG32(mmCP_HQD_ACTIVE
, data
);
363 static int kgd_hqd_dump(struct kgd_dev
*kgd
,
364 uint32_t pipe_id
, uint32_t queue_id
,
365 uint32_t (**dump
)[2], uint32_t *n_regs
)
367 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
369 #define HQD_N_REGS (54+4)
370 #define DUMP_REG(addr) do { \
371 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
373 (*dump)[i][0] = (addr) << 2; \
374 (*dump)[i++][1] = RREG32(addr); \
377 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
381 acquire_queue(kgd
, pipe_id
, queue_id
);
383 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0
);
384 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1
);
385 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2
);
386 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3
);
388 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_HQD_EOP_DONES
; reg
++)
393 WARN_ON_ONCE(i
!= HQD_N_REGS
);
399 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
,
400 uint32_t __user
*wptr
, struct mm_struct
*mm
)
402 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
403 struct vi_sdma_mqd
*m
;
404 unsigned long end_jiffies
;
405 uint32_t sdma_base_addr
;
408 m
= get_sdma_mqd(mqd
);
409 sdma_base_addr
= get_sdma_base_addr(m
);
410 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
411 m
->sdmax_rlcx_rb_cntl
& (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
));
413 end_jiffies
= msecs_to_jiffies(2000) + jiffies
;
415 data
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
416 if (data
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
418 if (time_after(jiffies
, end_jiffies
))
420 usleep_range(500, 1000);
422 if (m
->sdma_engine_id
) {
423 data
= RREG32(mmSDMA1_GFX_CONTEXT_CNTL
);
424 data
= REG_SET_FIELD(data
, SDMA1_GFX_CONTEXT_CNTL
,
426 WREG32(mmSDMA1_GFX_CONTEXT_CNTL
, data
);
428 data
= RREG32(mmSDMA0_GFX_CONTEXT_CNTL
);
429 data
= REG_SET_FIELD(data
, SDMA0_GFX_CONTEXT_CNTL
,
431 WREG32(mmSDMA0_GFX_CONTEXT_CNTL
, data
);
434 data
= REG_SET_FIELD(m
->sdmax_rlcx_doorbell
, SDMA0_RLC0_DOORBELL
,
436 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
, data
);
437 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR
, m
->sdmax_rlcx_rb_rptr
);
439 if (read_user_wptr(mm
, wptr
, data
))
440 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_WPTR
, data
);
442 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_WPTR
,
443 m
->sdmax_rlcx_rb_rptr
);
445 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_VIRTUAL_ADDR
,
446 m
->sdmax_rlcx_virtual_addr
);
447 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE
, m
->sdmax_rlcx_rb_base
);
448 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE_HI
,
449 m
->sdmax_rlcx_rb_base_hi
);
450 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR_ADDR_LO
,
451 m
->sdmax_rlcx_rb_rptr_addr_lo
);
452 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR_ADDR_HI
,
453 m
->sdmax_rlcx_rb_rptr_addr_hi
);
455 data
= REG_SET_FIELD(m
->sdmax_rlcx_rb_cntl
, SDMA0_RLC0_RB_CNTL
,
457 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
, data
);
462 static int kgd_hqd_sdma_dump(struct kgd_dev
*kgd
,
463 uint32_t engine_id
, uint32_t queue_id
,
464 uint32_t (**dump
)[2], uint32_t *n_regs
)
466 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
467 uint32_t sdma_offset
= engine_id
* SDMA1_REGISTER_OFFSET
+
468 queue_id
* KFD_VI_SDMA_QUEUE_OFFSET
;
471 #define HQD_N_REGS (19+4+2+3+7)
473 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
477 for (reg
= mmSDMA0_RLC0_RB_CNTL
; reg
<= mmSDMA0_RLC0_DOORBELL
; reg
++)
478 DUMP_REG(sdma_offset
+ reg
);
479 for (reg
= mmSDMA0_RLC0_VIRTUAL_ADDR
; reg
<= mmSDMA0_RLC0_WATERMARK
;
481 DUMP_REG(sdma_offset
+ reg
);
482 for (reg
= mmSDMA0_RLC0_CSA_ADDR_LO
; reg
<= mmSDMA0_RLC0_CSA_ADDR_HI
;
484 DUMP_REG(sdma_offset
+ reg
);
485 for (reg
= mmSDMA0_RLC0_IB_SUB_REMAIN
; reg
<= mmSDMA0_RLC0_DUMMY_REG
;
487 DUMP_REG(sdma_offset
+ reg
);
488 for (reg
= mmSDMA0_RLC0_MIDCMD_DATA0
; reg
<= mmSDMA0_RLC0_MIDCMD_CNTL
;
490 DUMP_REG(sdma_offset
+ reg
);
492 WARN_ON_ONCE(i
!= HQD_N_REGS
);
498 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
499 uint32_t pipe_id
, uint32_t queue_id
)
501 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
506 acquire_queue(kgd
, pipe_id
, queue_id
);
507 act
= RREG32(mmCP_HQD_ACTIVE
);
509 low
= lower_32_bits(queue_address
>> 8);
510 high
= upper_32_bits(queue_address
>> 8);
512 if (low
== RREG32(mmCP_HQD_PQ_BASE
) &&
513 high
== RREG32(mmCP_HQD_PQ_BASE_HI
))
520 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
)
522 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
523 struct vi_sdma_mqd
*m
;
524 uint32_t sdma_base_addr
;
525 uint32_t sdma_rlc_rb_cntl
;
527 m
= get_sdma_mqd(mqd
);
528 sdma_base_addr
= get_sdma_base_addr(m
);
530 sdma_rlc_rb_cntl
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
532 if (sdma_rlc_rb_cntl
& SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
)
538 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
539 enum kfd_preempt_type reset_type
,
540 unsigned int utimeout
, uint32_t pipe_id
,
543 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
545 enum hqd_dequeue_request_type type
;
546 unsigned long flags
, end_jiffies
;
548 struct vi_mqd
*m
= get_mqd(mqd
);
550 if (adev
->in_gpu_reset
)
553 acquire_queue(kgd
, pipe_id
, queue_id
);
555 if (m
->cp_hqd_vmid
== 0)
556 WREG32_FIELD(RLC_CP_SCHEDULERS
, scheduler1
, 0);
558 switch (reset_type
) {
559 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
:
562 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET
:
570 /* Workaround: If IQ timer is active and the wait time is close to or
571 * equal to 0, dequeueing is not safe. Wait until either the wait time
572 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
573 * cleared before continuing. Also, ensure wait times are set to at
576 local_irq_save(flags
);
578 retry
= 5000; /* wait for 500 usecs at maximum */
580 temp
= RREG32(mmCP_HQD_IQ_TIMER
);
581 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, PROCESSING_IQ
)) {
582 pr_debug("HW is processing IQ\n");
585 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, ACTIVE
)) {
586 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, RETRY_TYPE
)
587 == 3) /* SEM-rearm is safe */
589 /* Wait time 3 is safe for CP, but our MMIO read/write
590 * time is close to 1 microsecond, so check for 10 to
591 * leave more buffer room
593 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, WAIT_TIME
)
596 pr_debug("IQ timer is active\n");
601 pr_err("CP HQD IQ timer status time out\n");
609 temp
= RREG32(mmCP_HQD_DEQUEUE_REQUEST
);
610 if (!(temp
& CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK
))
612 pr_debug("Dequeue request is pending\n");
615 pr_err("CP HQD dequeue request time out\n");
621 local_irq_restore(flags
);
624 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, type
);
626 end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
628 temp
= RREG32(mmCP_HQD_ACTIVE
);
629 if (!(temp
& CP_HQD_ACTIVE__ACTIVE_MASK
))
631 if (time_after(jiffies
, end_jiffies
)) {
632 pr_err("cp queue preemption time out.\n");
636 usleep_range(500, 1000);
643 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
644 unsigned int utimeout
)
646 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
647 struct vi_sdma_mqd
*m
;
648 uint32_t sdma_base_addr
;
650 unsigned long end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
652 m
= get_sdma_mqd(mqd
);
653 sdma_base_addr
= get_sdma_base_addr(m
);
655 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
656 temp
= temp
& ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
;
657 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
, temp
);
660 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
661 if (temp
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
663 if (time_after(jiffies
, end_jiffies
))
665 usleep_range(500, 1000);
668 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
, 0);
669 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
670 RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
) |
671 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
);
673 m
->sdmax_rlcx_rb_rptr
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR
);
678 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
,
682 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
684 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
685 return reg
& ATC_VMID0_PASID_MAPPING__VALID_MASK
;
688 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
692 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
694 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
695 return reg
& ATC_VMID0_PASID_MAPPING__PASID_MASK
;
698 static int kgd_address_watch_disable(struct kgd_dev
*kgd
)
703 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
704 unsigned int watch_point_id
,
712 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
713 uint32_t gfx_index_val
,
716 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
719 mutex_lock(&adev
->grbm_idx_mutex
);
721 WREG32(mmGRBM_GFX_INDEX
, gfx_index_val
);
722 WREG32(mmSQ_CMD
, sq_cmd
);
724 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
725 INSTANCE_BROADCAST_WRITES
, 1);
726 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
727 SH_BROADCAST_WRITES
, 1);
728 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
729 SE_BROADCAST_WRITES
, 1);
731 WREG32(mmGRBM_GFX_INDEX
, data
);
732 mutex_unlock(&adev
->grbm_idx_mutex
);
737 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
738 unsigned int watch_point_id
,
739 unsigned int reg_offset
)
744 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
745 uint64_t va
, uint32_t vmid
)
747 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
749 lock_srbm(kgd
, 0, 0, 0, vmid
);
750 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID
, va
);
754 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
)
756 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
757 const union amdgpu_firmware_header
*hdr
;
761 hdr
= (const union amdgpu_firmware_header
*)
762 adev
->gfx
.pfp_fw
->data
;
766 hdr
= (const union amdgpu_firmware_header
*)
767 adev
->gfx
.me_fw
->data
;
771 hdr
= (const union amdgpu_firmware_header
*)
772 adev
->gfx
.ce_fw
->data
;
775 case KGD_ENGINE_MEC1
:
776 hdr
= (const union amdgpu_firmware_header
*)
777 adev
->gfx
.mec_fw
->data
;
780 case KGD_ENGINE_MEC2
:
781 hdr
= (const union amdgpu_firmware_header
*)
782 adev
->gfx
.mec2_fw
->data
;
786 hdr
= (const union amdgpu_firmware_header
*)
787 adev
->gfx
.rlc_fw
->data
;
790 case KGD_ENGINE_SDMA1
:
791 hdr
= (const union amdgpu_firmware_header
*)
792 adev
->sdma
.instance
[0].fw
->data
;
795 case KGD_ENGINE_SDMA2
:
796 hdr
= (const union amdgpu_firmware_header
*)
797 adev
->sdma
.instance
[1].fw
->data
;
807 /* Only 12 bit in use*/
808 return hdr
->common
.ucode_version
;
811 static void set_vm_context_page_table_base(struct kgd_dev
*kgd
, uint32_t vmid
,
812 uint64_t page_table_base
)
814 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
816 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
)) {
817 pr_err("trying to set page table base for wrong VMID\n");
820 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vmid
- 8,
821 lower_32_bits(page_table_base
));
824 static int invalidate_tlbs(struct kgd_dev
*kgd
, uint16_t pasid
)
826 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
830 if (adev
->in_gpu_reset
)
833 for (vmid
= 0; vmid
< 16; vmid
++) {
834 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
))
837 tmp
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
838 if ((tmp
& ATC_VMID0_PASID_MAPPING__VALID_MASK
) &&
839 (tmp
& ATC_VMID0_PASID_MAPPING__PASID_MASK
) == pasid
) {
840 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
841 RREG32(mmVM_INVALIDATE_RESPONSE
);
849 static int invalidate_tlbs_vmid(struct kgd_dev
*kgd
, uint16_t vmid
)
851 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
853 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
)) {
854 pr_err("non kfd vmid %d\n", vmid
);
858 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
859 RREG32(mmVM_INVALIDATE_RESPONSE
);