2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/fdtable.h>
24 #include <linux/uaccess.h>
25 #include <linux/firmware.h>
26 #include <linux/mmu_context.h>
29 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_ucode.h"
34 #include "gca/gfx_7_2_d.h"
35 #include "gca/gfx_7_2_enum.h"
36 #include "gca/gfx_7_2_sh_mask.h"
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
39 #include "gmc/gmc_7_1_d.h"
40 #include "gmc/gmc_7_1_sh_mask.h"
41 #include "cik_structs.h"
43 enum hqd_dequeue_request_type
{
50 MAX_TRAPID
= 8, /* 3 bits in the bitfield. */
51 MAX_WATCH_ADDRESSES
= 4
55 ADDRESS_WATCH_REG_ADDR_HI
= 0,
56 ADDRESS_WATCH_REG_ADDR_LO
,
57 ADDRESS_WATCH_REG_CNTL
,
61 /* not defined in the CI/KV reg file */
63 ADDRESS_WATCH_REG_CNTL_ATC_BIT
= 0x10000000UL
,
64 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK
= 0x00FFFFFF,
65 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION
= 0x03000000,
66 /* extend the mask to 26 bits to match the low address field */
67 ADDRESS_WATCH_REG_ADDLOW_SHIFT
= 6,
68 ADDRESS_WATCH_REG_ADDHIGH_MASK
= 0xFFFF
71 static const uint32_t watchRegs
[MAX_WATCH_ADDRESSES
* ADDRESS_WATCH_REG_MAX
] = {
72 mmTCP_WATCH0_ADDR_H
, mmTCP_WATCH0_ADDR_L
, mmTCP_WATCH0_CNTL
,
73 mmTCP_WATCH1_ADDR_H
, mmTCP_WATCH1_ADDR_L
, mmTCP_WATCH1_CNTL
,
74 mmTCP_WATCH2_ADDR_H
, mmTCP_WATCH2_ADDR_L
, mmTCP_WATCH2_CNTL
,
75 mmTCP_WATCH3_ADDR_H
, mmTCP_WATCH3_ADDR_L
, mmTCP_WATCH3_CNTL
78 union TCP_WATCH_CNTL_BITS
{
92 * Register access functions
95 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
96 uint32_t sh_mem_config
, uint32_t sh_mem_ape1_base
,
97 uint32_t sh_mem_ape1_limit
, uint32_t sh_mem_bases
);
99 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
102 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
);
103 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
104 uint32_t queue_id
, uint32_t __user
*wptr
,
105 uint32_t wptr_shift
, uint32_t wptr_mask
,
106 struct mm_struct
*mm
);
107 static int kgd_hqd_dump(struct kgd_dev
*kgd
,
108 uint32_t pipe_id
, uint32_t queue_id
,
109 uint32_t (**dump
)[2], uint32_t *n_regs
);
110 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
,
111 uint32_t __user
*wptr
, struct mm_struct
*mm
);
112 static int kgd_hqd_sdma_dump(struct kgd_dev
*kgd
,
113 uint32_t engine_id
, uint32_t queue_id
,
114 uint32_t (**dump
)[2], uint32_t *n_regs
);
115 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
116 uint32_t pipe_id
, uint32_t queue_id
);
118 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
119 enum kfd_preempt_type reset_type
,
120 unsigned int utimeout
, uint32_t pipe_id
,
122 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
);
123 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
124 unsigned int utimeout
);
125 static int kgd_address_watch_disable(struct kgd_dev
*kgd
);
126 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
127 unsigned int watch_point_id
,
131 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
132 uint32_t gfx_index_val
,
134 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
135 unsigned int watch_point_id
,
136 unsigned int reg_offset
);
138 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
, uint8_t vmid
);
139 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
142 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
);
143 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
144 uint64_t va
, uint32_t vmid
);
145 static void set_vm_context_page_table_base(struct kgd_dev
*kgd
, uint32_t vmid
,
146 uint64_t page_table_base
);
147 static int invalidate_tlbs(struct kgd_dev
*kgd
, uint16_t pasid
);
148 static int invalidate_tlbs_vmid(struct kgd_dev
*kgd
, uint16_t vmid
);
149 static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev
*kgd
);
151 /* Because of REG_GET_FIELD() being used, we put this function in the
152 * asic specific file.
154 static int get_tile_config(struct kgd_dev
*kgd
,
155 struct tile_config
*config
)
157 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
159 config
->gb_addr_config
= adev
->gfx
.config
.gb_addr_config
;
160 config
->num_banks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
161 MC_ARB_RAMCFG
, NOOFBANK
);
162 config
->num_ranks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
163 MC_ARB_RAMCFG
, NOOFRANKS
);
165 config
->tile_config_ptr
= adev
->gfx
.config
.tile_mode_array
;
166 config
->num_tile_configs
=
167 ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
168 config
->macro_tile_config_ptr
=
169 adev
->gfx
.config
.macrotile_mode_array
;
170 config
->num_macro_tile_configs
=
171 ARRAY_SIZE(adev
->gfx
.config
.macrotile_mode_array
);
176 static const struct kfd2kgd_calls kfd2kgd
= {
177 .program_sh_mem_settings
= kgd_program_sh_mem_settings
,
178 .set_pasid_vmid_mapping
= kgd_set_pasid_vmid_mapping
,
179 .init_interrupts
= kgd_init_interrupts
,
180 .hqd_load
= kgd_hqd_load
,
181 .hqd_sdma_load
= kgd_hqd_sdma_load
,
182 .hqd_dump
= kgd_hqd_dump
,
183 .hqd_sdma_dump
= kgd_hqd_sdma_dump
,
184 .hqd_is_occupied
= kgd_hqd_is_occupied
,
185 .hqd_sdma_is_occupied
= kgd_hqd_sdma_is_occupied
,
186 .hqd_destroy
= kgd_hqd_destroy
,
187 .hqd_sdma_destroy
= kgd_hqd_sdma_destroy
,
188 .address_watch_disable
= kgd_address_watch_disable
,
189 .address_watch_execute
= kgd_address_watch_execute
,
190 .wave_control_execute
= kgd_wave_control_execute
,
191 .address_watch_get_offset
= kgd_address_watch_get_offset
,
192 .get_atc_vmid_pasid_mapping_pasid
= get_atc_vmid_pasid_mapping_pasid
,
193 .get_atc_vmid_pasid_mapping_valid
= get_atc_vmid_pasid_mapping_valid
,
194 .get_fw_version
= get_fw_version
,
195 .set_scratch_backing_va
= set_scratch_backing_va
,
196 .get_tile_config
= get_tile_config
,
197 .set_vm_context_page_table_base
= set_vm_context_page_table_base
,
198 .invalidate_tlbs
= invalidate_tlbs
,
199 .invalidate_tlbs_vmid
= invalidate_tlbs_vmid
,
200 .read_vmid_from_vmfault_reg
= read_vmid_from_vmfault_reg
,
203 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_7_get_functions(void)
205 return (struct kfd2kgd_calls
*)&kfd2kgd
;
208 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
210 return (struct amdgpu_device
*)kgd
;
213 static void lock_srbm(struct kgd_dev
*kgd
, uint32_t mec
, uint32_t pipe
,
214 uint32_t queue
, uint32_t vmid
)
216 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
217 uint32_t value
= PIPEID(pipe
) | MEID(mec
) | VMID(vmid
) | QUEUEID(queue
);
219 mutex_lock(&adev
->srbm_mutex
);
220 WREG32(mmSRBM_GFX_CNTL
, value
);
223 static void unlock_srbm(struct kgd_dev
*kgd
)
225 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
227 WREG32(mmSRBM_GFX_CNTL
, 0);
228 mutex_unlock(&adev
->srbm_mutex
);
231 static void acquire_queue(struct kgd_dev
*kgd
, uint32_t pipe_id
,
234 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
236 uint32_t mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
237 uint32_t pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
239 lock_srbm(kgd
, mec
, pipe
, queue_id
, 0);
242 static void release_queue(struct kgd_dev
*kgd
)
247 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
248 uint32_t sh_mem_config
,
249 uint32_t sh_mem_ape1_base
,
250 uint32_t sh_mem_ape1_limit
,
251 uint32_t sh_mem_bases
)
253 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
255 lock_srbm(kgd
, 0, 0, 0, vmid
);
257 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
258 WREG32(mmSH_MEM_APE1_BASE
, sh_mem_ape1_base
);
259 WREG32(mmSH_MEM_APE1_LIMIT
, sh_mem_ape1_limit
);
260 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
265 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
268 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
271 * We have to assume that there is no outstanding mapping.
272 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
273 * a mapping is in progress or because a mapping finished and the
274 * SW cleared it. So the protocol is to always wait & clear.
276 uint32_t pasid_mapping
= (pasid
== 0) ? 0 : (uint32_t)pasid
|
277 ATC_VMID0_PASID_MAPPING__VALID_MASK
;
279 WREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
, pasid_mapping
);
281 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
) & (1U << vmid
)))
283 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
, 1U << vmid
);
285 /* Mapping vmid to pasid also for IH block */
286 WREG32(mmIH_VMID_0_LUT
+ vmid
, pasid_mapping
);
291 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
)
293 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
297 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
298 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
300 lock_srbm(kgd
, mec
, pipe
, 0, 0);
302 WREG32(mmCPC_INT_CNTL
, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
|
303 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK
);
310 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers
*m
)
314 retval
= m
->sdma_engine_id
* SDMA1_REGISTER_OFFSET
+
315 m
->sdma_queue_id
* KFD_CIK_SDMA_QUEUE_OFFSET
;
317 pr_debug("kfd: sdma base address: 0x%x\n", retval
);
322 static inline struct cik_mqd
*get_mqd(void *mqd
)
324 return (struct cik_mqd
*)mqd
;
327 static inline struct cik_sdma_rlc_registers
*get_sdma_mqd(void *mqd
)
329 return (struct cik_sdma_rlc_registers
*)mqd
;
332 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
333 uint32_t queue_id
, uint32_t __user
*wptr
,
334 uint32_t wptr_shift
, uint32_t wptr_mask
,
335 struct mm_struct
*mm
)
337 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
340 uint32_t reg
, wptr_val
, data
;
341 bool valid_wptr
= false;
345 acquire_queue(kgd
, pipe_id
, queue_id
);
347 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
348 mqd_hqd
= &m
->cp_mqd_base_addr_lo
;
350 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_MQD_CONTROL
; reg
++)
351 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
353 /* Copy userspace write pointer value to register.
354 * Activate doorbell logic to monitor subsequent changes.
356 data
= REG_SET_FIELD(m
->cp_hqd_pq_doorbell_control
,
357 CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
358 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, data
);
360 /* read_user_ptr may take the mm->mmap_sem.
361 * release srbm_mutex to avoid circular dependency between
362 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
365 valid_wptr
= read_user_wptr(mm
, wptr
, wptr_val
);
366 acquire_queue(kgd
, pipe_id
, queue_id
);
368 WREG32(mmCP_HQD_PQ_WPTR
, (wptr_val
<< wptr_shift
) & wptr_mask
);
370 data
= REG_SET_FIELD(m
->cp_hqd_active
, CP_HQD_ACTIVE
, ACTIVE
, 1);
371 WREG32(mmCP_HQD_ACTIVE
, data
);
378 static int kgd_hqd_dump(struct kgd_dev
*kgd
,
379 uint32_t pipe_id
, uint32_t queue_id
,
380 uint32_t (**dump
)[2], uint32_t *n_regs
)
382 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
384 #define HQD_N_REGS (35+4)
385 #define DUMP_REG(addr) do { \
386 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
388 (*dump)[i][0] = (addr) << 2; \
389 (*dump)[i++][1] = RREG32(addr); \
392 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
396 acquire_queue(kgd
, pipe_id
, queue_id
);
398 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0
);
399 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1
);
400 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2
);
401 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3
);
403 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_MQD_CONTROL
; reg
++)
408 WARN_ON_ONCE(i
!= HQD_N_REGS
);
414 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
,
415 uint32_t __user
*wptr
, struct mm_struct
*mm
)
417 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
418 struct cik_sdma_rlc_registers
*m
;
419 unsigned long end_jiffies
;
420 uint32_t sdma_base_addr
;
423 m
= get_sdma_mqd(mqd
);
424 sdma_base_addr
= get_sdma_base_addr(m
);
426 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
427 m
->sdma_rlc_rb_cntl
& (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
));
429 end_jiffies
= msecs_to_jiffies(2000) + jiffies
;
431 data
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
432 if (data
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
434 if (time_after(jiffies
, end_jiffies
))
436 usleep_range(500, 1000);
438 if (m
->sdma_engine_id
) {
439 data
= RREG32(mmSDMA1_GFX_CONTEXT_CNTL
);
440 data
= REG_SET_FIELD(data
, SDMA1_GFX_CONTEXT_CNTL
,
442 WREG32(mmSDMA1_GFX_CONTEXT_CNTL
, data
);
444 data
= RREG32(mmSDMA0_GFX_CONTEXT_CNTL
);
445 data
= REG_SET_FIELD(data
, SDMA0_GFX_CONTEXT_CNTL
,
447 WREG32(mmSDMA0_GFX_CONTEXT_CNTL
, data
);
450 data
= REG_SET_FIELD(m
->sdma_rlc_doorbell
, SDMA0_RLC0_DOORBELL
,
452 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
, data
);
453 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR
, m
->sdma_rlc_rb_rptr
);
455 if (read_user_wptr(mm
, wptr
, data
))
456 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_WPTR
, data
);
458 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_WPTR
,
459 m
->sdma_rlc_rb_rptr
);
461 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_VIRTUAL_ADDR
,
462 m
->sdma_rlc_virtual_addr
);
463 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE
, m
->sdma_rlc_rb_base
);
464 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE_HI
,
465 m
->sdma_rlc_rb_base_hi
);
466 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR_ADDR_LO
,
467 m
->sdma_rlc_rb_rptr_addr_lo
);
468 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR_ADDR_HI
,
469 m
->sdma_rlc_rb_rptr_addr_hi
);
471 data
= REG_SET_FIELD(m
->sdma_rlc_rb_cntl
, SDMA0_RLC0_RB_CNTL
,
473 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
, data
);
478 static int kgd_hqd_sdma_dump(struct kgd_dev
*kgd
,
479 uint32_t engine_id
, uint32_t queue_id
,
480 uint32_t (**dump
)[2], uint32_t *n_regs
)
482 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
483 uint32_t sdma_offset
= engine_id
* SDMA1_REGISTER_OFFSET
+
484 queue_id
* KFD_CIK_SDMA_QUEUE_OFFSET
;
487 #define HQD_N_REGS (19+4)
489 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
493 for (reg
= mmSDMA0_RLC0_RB_CNTL
; reg
<= mmSDMA0_RLC0_DOORBELL
; reg
++)
494 DUMP_REG(sdma_offset
+ reg
);
495 for (reg
= mmSDMA0_RLC0_VIRTUAL_ADDR
; reg
<= mmSDMA0_RLC0_WATERMARK
;
497 DUMP_REG(sdma_offset
+ reg
);
499 WARN_ON_ONCE(i
!= HQD_N_REGS
);
505 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
506 uint32_t pipe_id
, uint32_t queue_id
)
508 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
513 acquire_queue(kgd
, pipe_id
, queue_id
);
514 act
= RREG32(mmCP_HQD_ACTIVE
);
516 low
= lower_32_bits(queue_address
>> 8);
517 high
= upper_32_bits(queue_address
>> 8);
519 if (low
== RREG32(mmCP_HQD_PQ_BASE
) &&
520 high
== RREG32(mmCP_HQD_PQ_BASE_HI
))
527 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
)
529 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
530 struct cik_sdma_rlc_registers
*m
;
531 uint32_t sdma_base_addr
;
532 uint32_t sdma_rlc_rb_cntl
;
534 m
= get_sdma_mqd(mqd
);
535 sdma_base_addr
= get_sdma_base_addr(m
);
537 sdma_rlc_rb_cntl
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
539 if (sdma_rlc_rb_cntl
& SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
)
545 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
546 enum kfd_preempt_type reset_type
,
547 unsigned int utimeout
, uint32_t pipe_id
,
550 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
552 enum hqd_dequeue_request_type type
;
553 unsigned long flags
, end_jiffies
;
556 if (adev
->in_gpu_reset
)
559 acquire_queue(kgd
, pipe_id
, queue_id
);
560 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, 0);
562 switch (reset_type
) {
563 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
:
566 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET
:
574 /* Workaround: If IQ timer is active and the wait time is close to or
575 * equal to 0, dequeueing is not safe. Wait until either the wait time
576 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
577 * cleared before continuing. Also, ensure wait times are set to at
580 local_irq_save(flags
);
582 retry
= 5000; /* wait for 500 usecs at maximum */
584 temp
= RREG32(mmCP_HQD_IQ_TIMER
);
585 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, PROCESSING_IQ
)) {
586 pr_debug("HW is processing IQ\n");
589 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, ACTIVE
)) {
590 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, RETRY_TYPE
)
591 == 3) /* SEM-rearm is safe */
593 /* Wait time 3 is safe for CP, but our MMIO read/write
594 * time is close to 1 microsecond, so check for 10 to
595 * leave more buffer room
597 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, WAIT_TIME
)
600 pr_debug("IQ timer is active\n");
605 pr_err("CP HQD IQ timer status time out\n");
613 temp
= RREG32(mmCP_HQD_DEQUEUE_REQUEST
);
614 if (!(temp
& CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK
))
616 pr_debug("Dequeue request is pending\n");
619 pr_err("CP HQD dequeue request time out\n");
625 local_irq_restore(flags
);
628 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, type
);
630 end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
632 temp
= RREG32(mmCP_HQD_ACTIVE
);
633 if (!(temp
& CP_HQD_ACTIVE__ACTIVE_MASK
))
635 if (time_after(jiffies
, end_jiffies
)) {
636 pr_err("cp queue preemption time out\n");
640 usleep_range(500, 1000);
647 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
648 unsigned int utimeout
)
650 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
651 struct cik_sdma_rlc_registers
*m
;
652 uint32_t sdma_base_addr
;
654 unsigned long end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
656 m
= get_sdma_mqd(mqd
);
657 sdma_base_addr
= get_sdma_base_addr(m
);
659 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
660 temp
= temp
& ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
;
661 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
, temp
);
664 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
665 if (temp
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
667 if (time_after(jiffies
, end_jiffies
))
669 usleep_range(500, 1000);
672 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
, 0);
673 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
674 RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
) |
675 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
);
677 m
->sdma_rlc_rb_rptr
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR
);
682 static int kgd_address_watch_disable(struct kgd_dev
*kgd
)
684 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
685 union TCP_WATCH_CNTL_BITS cntl
;
690 cntl
.bitfields
.valid
= 0;
691 cntl
.bitfields
.mask
= ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK
;
692 cntl
.bitfields
.atc
= 1;
694 /* Turning off this address until we set all the registers */
695 for (i
= 0; i
< MAX_WATCH_ADDRESSES
; i
++)
696 WREG32(watchRegs
[i
* ADDRESS_WATCH_REG_MAX
+
697 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
702 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
703 unsigned int watch_point_id
,
708 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
709 union TCP_WATCH_CNTL_BITS cntl
;
711 cntl
.u32All
= cntl_val
;
713 /* Turning off this watch point until we set all the registers */
714 cntl
.bitfields
.valid
= 0;
715 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
716 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
718 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
719 ADDRESS_WATCH_REG_ADDR_HI
], addr_hi
);
721 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
722 ADDRESS_WATCH_REG_ADDR_LO
], addr_lo
);
724 /* Enable the watch point */
725 cntl
.bitfields
.valid
= 1;
727 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
728 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
733 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
734 uint32_t gfx_index_val
,
737 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
740 mutex_lock(&adev
->grbm_idx_mutex
);
742 WREG32(mmGRBM_GFX_INDEX
, gfx_index_val
);
743 WREG32(mmSQ_CMD
, sq_cmd
);
745 /* Restore the GRBM_GFX_INDEX register */
747 data
= GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK
|
748 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
749 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
;
751 WREG32(mmGRBM_GFX_INDEX
, data
);
753 mutex_unlock(&adev
->grbm_idx_mutex
);
758 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
759 unsigned int watch_point_id
,
760 unsigned int reg_offset
)
762 return watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+ reg_offset
];
765 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
,
769 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
771 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
772 return reg
& ATC_VMID0_PASID_MAPPING__VALID_MASK
;
775 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
779 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
781 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
782 return reg
& ATC_VMID0_PASID_MAPPING__PASID_MASK
;
785 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
786 uint64_t va
, uint32_t vmid
)
788 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
790 lock_srbm(kgd
, 0, 0, 0, vmid
);
791 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID
, va
);
795 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
)
797 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
798 const union amdgpu_firmware_header
*hdr
;
802 hdr
= (const union amdgpu_firmware_header
*)
803 adev
->gfx
.pfp_fw
->data
;
807 hdr
= (const union amdgpu_firmware_header
*)
808 adev
->gfx
.me_fw
->data
;
812 hdr
= (const union amdgpu_firmware_header
*)
813 adev
->gfx
.ce_fw
->data
;
816 case KGD_ENGINE_MEC1
:
817 hdr
= (const union amdgpu_firmware_header
*)
818 adev
->gfx
.mec_fw
->data
;
821 case KGD_ENGINE_MEC2
:
822 hdr
= (const union amdgpu_firmware_header
*)
823 adev
->gfx
.mec2_fw
->data
;
827 hdr
= (const union amdgpu_firmware_header
*)
828 adev
->gfx
.rlc_fw
->data
;
831 case KGD_ENGINE_SDMA1
:
832 hdr
= (const union amdgpu_firmware_header
*)
833 adev
->sdma
.instance
[0].fw
->data
;
836 case KGD_ENGINE_SDMA2
:
837 hdr
= (const union amdgpu_firmware_header
*)
838 adev
->sdma
.instance
[1].fw
->data
;
848 /* Only 12 bit in use*/
849 return hdr
->common
.ucode_version
;
852 static void set_vm_context_page_table_base(struct kgd_dev
*kgd
, uint32_t vmid
,
853 uint64_t page_table_base
)
855 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
857 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
)) {
858 pr_err("trying to set page table base for wrong VMID\n");
861 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vmid
- 8,
862 lower_32_bits(page_table_base
));
865 static int invalidate_tlbs(struct kgd_dev
*kgd
, uint16_t pasid
)
867 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
871 if (adev
->in_gpu_reset
)
874 for (vmid
= 0; vmid
< 16; vmid
++) {
875 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
))
878 tmp
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
879 if ((tmp
& ATC_VMID0_PASID_MAPPING__VALID_MASK
) &&
880 (tmp
& ATC_VMID0_PASID_MAPPING__PASID_MASK
) == pasid
) {
881 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
882 RREG32(mmVM_INVALIDATE_RESPONSE
);
890 static int invalidate_tlbs_vmid(struct kgd_dev
*kgd
, uint16_t vmid
)
892 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
894 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
)) {
895 pr_err("non kfd vmid\n");
899 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
900 RREG32(mmVM_INVALIDATE_RESPONSE
);
905 * read_vmid_from_vmfault_reg - read vmid from register
907 * adev: amdgpu_device pointer
908 * @vmid: vmid pointer
909 * read vmid from register (CIK).
911 static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev
*kgd
)
913 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
915 uint32_t status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
917 return REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);