2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/ratelimit.h>
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/types.h>
29 #include <linux/bitops.h>
30 #include <linux/sched.h>
32 #include "kfd_device_queue_manager.h"
33 #include "kfd_mqd_manager.h"
35 #include "kfd_kernel_queue.h"
36 #include "amdgpu_amdkfd.h"
38 /* Size of the per-pipe EOP queue */
39 #define CIK_HPD_EOP_BYTES_LOG2 11
40 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
42 static int set_pasid_vmid_mapping(struct device_queue_manager
*dqm
,
43 unsigned int pasid
, unsigned int vmid
);
45 static int execute_queues_cpsch(struct device_queue_manager
*dqm
,
46 enum kfd_unmap_queues_filter filter
,
47 uint32_t filter_param
);
48 static int unmap_queues_cpsch(struct device_queue_manager
*dqm
,
49 enum kfd_unmap_queues_filter filter
,
50 uint32_t filter_param
);
52 static int map_queues_cpsch(struct device_queue_manager
*dqm
);
54 static void deallocate_sdma_queue(struct device_queue_manager
*dqm
,
57 static inline void deallocate_hqd(struct device_queue_manager
*dqm
,
59 static int allocate_hqd(struct device_queue_manager
*dqm
, struct queue
*q
);
60 static int allocate_sdma_queue(struct device_queue_manager
*dqm
,
62 static void kfd_process_hw_exception(struct work_struct
*work
);
65 enum KFD_MQD_TYPE
get_mqd_type_from_queue_type(enum kfd_queue_type type
)
67 if (type
== KFD_QUEUE_TYPE_SDMA
|| type
== KFD_QUEUE_TYPE_SDMA_XGMI
)
68 return KFD_MQD_TYPE_SDMA
;
69 return KFD_MQD_TYPE_CP
;
72 static bool is_pipe_enabled(struct device_queue_manager
*dqm
, int mec
, int pipe
)
75 int pipe_offset
= mec
* dqm
->dev
->shared_resources
.num_pipe_per_mec
76 + pipe
* dqm
->dev
->shared_resources
.num_queue_per_pipe
;
78 /* queue is available for KFD usage if bit is 1 */
79 for (i
= 0; i
< dqm
->dev
->shared_resources
.num_queue_per_pipe
; ++i
)
80 if (test_bit(pipe_offset
+ i
,
81 dqm
->dev
->shared_resources
.queue_bitmap
))
86 unsigned int get_queues_num(struct device_queue_manager
*dqm
)
88 return bitmap_weight(dqm
->dev
->shared_resources
.queue_bitmap
,
92 unsigned int get_queues_per_pipe(struct device_queue_manager
*dqm
)
94 return dqm
->dev
->shared_resources
.num_queue_per_pipe
;
97 unsigned int get_pipes_per_mec(struct device_queue_manager
*dqm
)
99 return dqm
->dev
->shared_resources
.num_pipe_per_mec
;
102 static unsigned int get_num_sdma_engines(struct device_queue_manager
*dqm
)
104 return dqm
->dev
->device_info
->num_sdma_engines
;
107 static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager
*dqm
)
109 return dqm
->dev
->device_info
->num_xgmi_sdma_engines
;
112 unsigned int get_num_sdma_queues(struct device_queue_manager
*dqm
)
114 return dqm
->dev
->device_info
->num_sdma_engines
115 * dqm
->dev
->device_info
->num_sdma_queues_per_engine
;
118 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager
*dqm
)
120 return dqm
->dev
->device_info
->num_xgmi_sdma_engines
121 * dqm
->dev
->device_info
->num_sdma_queues_per_engine
;
124 void program_sh_mem_settings(struct device_queue_manager
*dqm
,
125 struct qcm_process_device
*qpd
)
127 return dqm
->dev
->kfd2kgd
->program_sh_mem_settings(
128 dqm
->dev
->kgd
, qpd
->vmid
,
130 qpd
->sh_mem_ape1_base
,
131 qpd
->sh_mem_ape1_limit
,
135 static int allocate_doorbell(struct qcm_process_device
*qpd
, struct queue
*q
)
137 struct kfd_dev
*dev
= qpd
->dqm
->dev
;
139 if (!KFD_IS_SOC15(dev
->device_info
->asic_family
)) {
140 /* On pre-SOC15 chips we need to use the queue ID to
141 * preserve the user mode ABI.
143 q
->doorbell_id
= q
->properties
.queue_id
;
144 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
145 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
146 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
147 * doorbell assignments based on the engine and queue id.
148 * The doobell index distance between RLC (2*i) and (2*i+1)
149 * for a SDMA engine is 512.
151 uint32_t *idx_offset
=
152 dev
->shared_resources
.sdma_doorbell_idx
;
154 q
->doorbell_id
= idx_offset
[q
->properties
.sdma_engine_id
]
155 + (q
->properties
.sdma_queue_id
& 1)
156 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
157 + (q
->properties
.sdma_queue_id
>> 1);
159 /* For CP queues on SOC15 reserve a free doorbell ID */
162 found
= find_first_zero_bit(qpd
->doorbell_bitmap
,
163 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS
);
164 if (found
>= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS
) {
165 pr_debug("No doorbells available");
168 set_bit(found
, qpd
->doorbell_bitmap
);
169 q
->doorbell_id
= found
;
172 q
->properties
.doorbell_off
=
173 kfd_get_doorbell_dw_offset_in_bar(dev
, q
->process
,
179 static void deallocate_doorbell(struct qcm_process_device
*qpd
,
183 struct kfd_dev
*dev
= qpd
->dqm
->dev
;
185 if (!KFD_IS_SOC15(dev
->device_info
->asic_family
) ||
186 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
187 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
)
190 old
= test_and_clear_bit(q
->doorbell_id
, qpd
->doorbell_bitmap
);
194 static int allocate_vmid(struct device_queue_manager
*dqm
,
195 struct qcm_process_device
*qpd
,
198 int allocated_vmid
= -1, i
;
200 for (i
= dqm
->dev
->vm_info
.first_vmid_kfd
;
201 i
<= dqm
->dev
->vm_info
.last_vmid_kfd
; i
++) {
202 if (!dqm
->vmid_pasid
[i
]) {
208 if (allocated_vmid
< 0) {
209 pr_err("no more vmid to allocate\n");
213 pr_debug("vmid allocated: %d\n", allocated_vmid
);
215 dqm
->vmid_pasid
[allocated_vmid
] = q
->process
->pasid
;
217 set_pasid_vmid_mapping(dqm
, q
->process
->pasid
, allocated_vmid
);
219 qpd
->vmid
= allocated_vmid
;
220 q
->properties
.vmid
= allocated_vmid
;
222 program_sh_mem_settings(dqm
, qpd
);
224 /* qpd->page_table_base is set earlier when register_process()
225 * is called, i.e. when the first queue is created.
227 dqm
->dev
->kfd2kgd
->set_vm_context_page_table_base(dqm
->dev
->kgd
,
229 qpd
->page_table_base
);
230 /* invalidate the VM context after pasid and vmid mapping is set up */
231 kfd_flush_tlb(qpd_to_pdd(qpd
));
233 if (dqm
->dev
->kfd2kgd
->set_scratch_backing_va
)
234 dqm
->dev
->kfd2kgd
->set_scratch_backing_va(dqm
->dev
->kgd
,
235 qpd
->sh_hidden_private_base
, qpd
->vmid
);
240 static int flush_texture_cache_nocpsch(struct kfd_dev
*kdev
,
241 struct qcm_process_device
*qpd
)
243 const struct packet_manager_funcs
*pmf
= qpd
->dqm
->packets
.pmf
;
249 ret
= pmf
->release_mem(qpd
->ib_base
, (uint32_t *)qpd
->ib_kaddr
);
253 return amdgpu_amdkfd_submit_ib(kdev
->kgd
, KGD_ENGINE_MEC1
, qpd
->vmid
,
254 qpd
->ib_base
, (uint32_t *)qpd
->ib_kaddr
,
255 pmf
->release_mem_size
/ sizeof(uint32_t));
258 static void deallocate_vmid(struct device_queue_manager
*dqm
,
259 struct qcm_process_device
*qpd
,
262 /* On GFX v7, CP doesn't flush TC at dequeue */
263 if (q
->device
->device_info
->asic_family
== CHIP_HAWAII
)
264 if (flush_texture_cache_nocpsch(q
->device
, qpd
))
265 pr_err("Failed to flush TC\n");
267 kfd_flush_tlb(qpd_to_pdd(qpd
));
269 /* Release the vmid mapping */
270 set_pasid_vmid_mapping(dqm
, 0, qpd
->vmid
);
271 dqm
->vmid_pasid
[qpd
->vmid
] = 0;
274 q
->properties
.vmid
= 0;
277 static int create_queue_nocpsch(struct device_queue_manager
*dqm
,
279 struct qcm_process_device
*qpd
)
281 struct mqd_manager
*mqd_mgr
;
288 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
289 pr_warn("Can't create new usermode queue because %d queues were already created\n",
290 dqm
->total_queue_count
);
295 if (list_empty(&qpd
->queues_list
)) {
296 retval
= allocate_vmid(dqm
, qpd
, q
);
300 q
->properties
.vmid
= qpd
->vmid
;
302 * Eviction state logic: mark all queues as evicted, even ones
303 * not currently active. Restoring inactive queues later only
304 * updates the is_evicted flag but is a no-op otherwise.
306 q
->properties
.is_evicted
= !!qpd
->evicted
;
308 q
->properties
.tba_addr
= qpd
->tba_addr
;
309 q
->properties
.tma_addr
= qpd
->tma_addr
;
311 mqd_mgr
= dqm
->mqd_mgrs
[get_mqd_type_from_queue_type(
312 q
->properties
.type
)];
313 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
) {
314 retval
= allocate_hqd(dqm
, q
);
316 goto deallocate_vmid
;
317 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
319 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
320 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
321 retval
= allocate_sdma_queue(dqm
, q
);
323 goto deallocate_vmid
;
324 dqm
->asic_ops
.init_sdma_vm(dqm
, q
, qpd
);
327 retval
= allocate_doorbell(qpd
, q
);
329 goto out_deallocate_hqd
;
331 /* Temporarily release dqm lock to avoid a circular lock dependency */
333 q
->mqd_mem_obj
= mqd_mgr
->allocate_mqd(mqd_mgr
->dev
, &q
->properties
);
336 if (!q
->mqd_mem_obj
) {
338 goto out_deallocate_doorbell
;
340 mqd_mgr
->init_mqd(mqd_mgr
, &q
->mqd
, q
->mqd_mem_obj
,
341 &q
->gart_mqd_addr
, &q
->properties
);
342 if (q
->properties
.is_active
) {
343 if (!dqm
->sched_running
) {
344 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
345 goto add_queue_to_list
;
348 if (WARN(q
->process
->mm
!= current
->mm
,
349 "should only run in user thread"))
352 retval
= mqd_mgr
->load_mqd(mqd_mgr
, q
->mqd
, q
->pipe
,
353 q
->queue
, &q
->properties
, current
->mm
);
359 list_add(&q
->list
, &qpd
->queues_list
);
361 if (q
->properties
.is_active
)
364 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
365 dqm
->sdma_queue_count
++;
366 else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
)
367 dqm
->xgmi_sdma_queue_count
++;
370 * Unconditionally increment this counter, regardless of the queue's
371 * type or whether the queue is active.
373 dqm
->total_queue_count
++;
374 pr_debug("Total of %d queues are accountable so far\n",
375 dqm
->total_queue_count
);
379 mqd_mgr
->free_mqd(mqd_mgr
, q
->mqd
, q
->mqd_mem_obj
);
380 out_deallocate_doorbell
:
381 deallocate_doorbell(qpd
, q
);
383 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
)
384 deallocate_hqd(dqm
, q
);
385 else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
386 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
)
387 deallocate_sdma_queue(dqm
, q
);
389 if (list_empty(&qpd
->queues_list
))
390 deallocate_vmid(dqm
, qpd
, q
);
396 static int allocate_hqd(struct device_queue_manager
*dqm
, struct queue
*q
)
403 for (pipe
= dqm
->next_pipe_to_allocate
, i
= 0;
404 i
< get_pipes_per_mec(dqm
);
405 pipe
= ((pipe
+ 1) % get_pipes_per_mec(dqm
)), ++i
) {
407 if (!is_pipe_enabled(dqm
, 0, pipe
))
410 if (dqm
->allocated_queues
[pipe
] != 0) {
411 bit
= ffs(dqm
->allocated_queues
[pipe
]) - 1;
412 dqm
->allocated_queues
[pipe
] &= ~(1 << bit
);
423 pr_debug("hqd slot - pipe %d, queue %d\n", q
->pipe
, q
->queue
);
424 /* horizontal hqd allocation */
425 dqm
->next_pipe_to_allocate
= (pipe
+ 1) % get_pipes_per_mec(dqm
);
430 static inline void deallocate_hqd(struct device_queue_manager
*dqm
,
433 dqm
->allocated_queues
[q
->pipe
] |= (1 << q
->queue
);
436 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
437 * to avoid asynchronized access
439 static int destroy_queue_nocpsch_locked(struct device_queue_manager
*dqm
,
440 struct qcm_process_device
*qpd
,
444 struct mqd_manager
*mqd_mgr
;
446 mqd_mgr
= dqm
->mqd_mgrs
[get_mqd_type_from_queue_type(
447 q
->properties
.type
)];
449 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
) {
450 deallocate_hqd(dqm
, q
);
451 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
) {
452 dqm
->sdma_queue_count
--;
453 deallocate_sdma_queue(dqm
, q
);
454 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
455 dqm
->xgmi_sdma_queue_count
--;
456 deallocate_sdma_queue(dqm
, q
);
458 pr_debug("q->properties.type %d is invalid\n",
462 dqm
->total_queue_count
--;
464 deallocate_doorbell(qpd
, q
);
466 if (!dqm
->sched_running
) {
467 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
471 retval
= mqd_mgr
->destroy_mqd(mqd_mgr
, q
->mqd
,
472 KFD_PREEMPT_TYPE_WAVEFRONT_RESET
,
473 KFD_UNMAP_LATENCY_MS
,
475 if (retval
== -ETIME
)
476 qpd
->reset_wavefronts
= true;
478 mqd_mgr
->free_mqd(mqd_mgr
, q
->mqd
, q
->mqd_mem_obj
);
481 if (list_empty(&qpd
->queues_list
)) {
482 if (qpd
->reset_wavefronts
) {
483 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
485 /* dbgdev_wave_reset_wavefronts has to be called before
486 * deallocate_vmid(), i.e. when vmid is still in use.
488 dbgdev_wave_reset_wavefronts(dqm
->dev
,
490 qpd
->reset_wavefronts
= false;
493 deallocate_vmid(dqm
, qpd
, q
);
496 if (q
->properties
.is_active
)
502 static int destroy_queue_nocpsch(struct device_queue_manager
*dqm
,
503 struct qcm_process_device
*qpd
,
509 retval
= destroy_queue_nocpsch_locked(dqm
, qpd
, q
);
515 static int update_queue(struct device_queue_manager
*dqm
, struct queue
*q
)
518 struct mqd_manager
*mqd_mgr
;
519 struct kfd_process_device
*pdd
;
520 bool prev_active
= false;
523 pdd
= kfd_get_process_device_data(q
->device
, q
->process
);
528 mqd_mgr
= dqm
->mqd_mgrs
[get_mqd_type_from_queue_type(
529 q
->properties
.type
)];
531 /* Save previous activity state for counters */
532 prev_active
= q
->properties
.is_active
;
534 /* Make sure the queue is unmapped before updating the MQD */
535 if (dqm
->sched_policy
!= KFD_SCHED_POLICY_NO_HWS
) {
536 retval
= unmap_queues_cpsch(dqm
,
537 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
, 0);
539 pr_err("unmap queue failed\n");
542 } else if (prev_active
&&
543 (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
||
544 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
545 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
)) {
547 if (!dqm
->sched_running
) {
548 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
552 retval
= mqd_mgr
->destroy_mqd(mqd_mgr
, q
->mqd
,
553 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
,
554 KFD_UNMAP_LATENCY_MS
, q
->pipe
, q
->queue
);
556 pr_err("destroy mqd failed\n");
561 mqd_mgr
->update_mqd(mqd_mgr
, q
->mqd
, &q
->properties
);
564 * check active state vs. the previous state and modify
565 * counter accordingly. map_queues_cpsch uses the
566 * dqm->queue_count to determine whether a new runlist must be
569 if (q
->properties
.is_active
&& !prev_active
)
571 else if (!q
->properties
.is_active
&& prev_active
)
574 if (dqm
->sched_policy
!= KFD_SCHED_POLICY_NO_HWS
)
575 retval
= map_queues_cpsch(dqm
);
576 else if (q
->properties
.is_active
&&
577 (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
||
578 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
579 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
)) {
580 if (WARN(q
->process
->mm
!= current
->mm
,
581 "should only run in user thread"))
584 retval
= mqd_mgr
->load_mqd(mqd_mgr
, q
->mqd
,
586 &q
->properties
, current
->mm
);
594 static int evict_process_queues_nocpsch(struct device_queue_manager
*dqm
,
595 struct qcm_process_device
*qpd
)
598 struct mqd_manager
*mqd_mgr
;
599 struct kfd_process_device
*pdd
;
603 if (qpd
->evicted
++ > 0) /* already evicted, do nothing */
606 pdd
= qpd_to_pdd(qpd
);
607 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
608 pdd
->process
->pasid
);
610 /* Mark all queues as evicted. Deactivate all active queues on
613 list_for_each_entry(q
, &qpd
->queues_list
, list
) {
614 q
->properties
.is_evicted
= true;
615 if (!q
->properties
.is_active
)
618 mqd_mgr
= dqm
->mqd_mgrs
[get_mqd_type_from_queue_type(
619 q
->properties
.type
)];
620 q
->properties
.is_active
= false;
623 if (WARN_ONCE(!dqm
->sched_running
, "Evict when stopped\n"))
626 retval
= mqd_mgr
->destroy_mqd(mqd_mgr
, q
->mqd
,
627 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
,
628 KFD_UNMAP_LATENCY_MS
, q
->pipe
, q
->queue
);
630 /* Return the first error, but keep going to
631 * maintain a consistent eviction state
641 static int evict_process_queues_cpsch(struct device_queue_manager
*dqm
,
642 struct qcm_process_device
*qpd
)
645 struct kfd_process_device
*pdd
;
649 if (qpd
->evicted
++ > 0) /* already evicted, do nothing */
652 pdd
= qpd_to_pdd(qpd
);
653 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
654 pdd
->process
->pasid
);
656 /* Mark all queues as evicted. Deactivate all active queues on
659 list_for_each_entry(q
, &qpd
->queues_list
, list
) {
660 q
->properties
.is_evicted
= true;
661 if (!q
->properties
.is_active
)
664 q
->properties
.is_active
= false;
667 retval
= execute_queues_cpsch(dqm
,
669 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES
:
670 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
, 0);
677 static int restore_process_queues_nocpsch(struct device_queue_manager
*dqm
,
678 struct qcm_process_device
*qpd
)
680 struct mm_struct
*mm
= NULL
;
682 struct mqd_manager
*mqd_mgr
;
683 struct kfd_process_device
*pdd
;
687 pdd
= qpd_to_pdd(qpd
);
688 /* Retrieve PD base */
689 pd_base
= amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd
->vm
);
692 if (WARN_ON_ONCE(!qpd
->evicted
)) /* already restored, do nothing */
694 if (qpd
->evicted
> 1) { /* ref count still > 0, decrement & quit */
699 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
700 pdd
->process
->pasid
);
702 /* Update PD Base in QPD */
703 qpd
->page_table_base
= pd_base
;
704 pr_debug("Updated PD address to 0x%llx\n", pd_base
);
706 if (!list_empty(&qpd
->queues_list
)) {
707 dqm
->dev
->kfd2kgd
->set_vm_context_page_table_base(
710 qpd
->page_table_base
);
714 /* Take a safe reference to the mm_struct, which may otherwise
715 * disappear even while the kfd_process is still referenced.
717 mm
= get_task_mm(pdd
->process
->lead_thread
);
723 /* Remove the eviction flags. Activate queues that are not
724 * inactive for other reasons.
726 list_for_each_entry(q
, &qpd
->queues_list
, list
) {
727 q
->properties
.is_evicted
= false;
728 if (!QUEUE_IS_ACTIVE(q
->properties
))
731 mqd_mgr
= dqm
->mqd_mgrs
[get_mqd_type_from_queue_type(
732 q
->properties
.type
)];
733 q
->properties
.is_active
= true;
736 if (WARN_ONCE(!dqm
->sched_running
, "Restore when stopped\n"))
739 retval
= mqd_mgr
->load_mqd(mqd_mgr
, q
->mqd
, q
->pipe
,
740 q
->queue
, &q
->properties
, mm
);
742 /* Return the first error, but keep going to
743 * maintain a consistent eviction state
755 static int restore_process_queues_cpsch(struct device_queue_manager
*dqm
,
756 struct qcm_process_device
*qpd
)
759 struct kfd_process_device
*pdd
;
763 pdd
= qpd_to_pdd(qpd
);
764 /* Retrieve PD base */
765 pd_base
= amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd
->vm
);
768 if (WARN_ON_ONCE(!qpd
->evicted
)) /* already restored, do nothing */
770 if (qpd
->evicted
> 1) { /* ref count still > 0, decrement & quit */
775 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
776 pdd
->process
->pasid
);
778 /* Update PD Base in QPD */
779 qpd
->page_table_base
= pd_base
;
780 pr_debug("Updated PD address to 0x%llx\n", pd_base
);
782 /* activate all active queues on the qpd */
783 list_for_each_entry(q
, &qpd
->queues_list
, list
) {
784 q
->properties
.is_evicted
= false;
785 if (!QUEUE_IS_ACTIVE(q
->properties
))
788 q
->properties
.is_active
= true;
791 retval
= execute_queues_cpsch(dqm
,
792 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
, 0);
799 static int register_process(struct device_queue_manager
*dqm
,
800 struct qcm_process_device
*qpd
)
802 struct device_process_node
*n
;
803 struct kfd_process_device
*pdd
;
807 n
= kzalloc(sizeof(*n
), GFP_KERNEL
);
813 pdd
= qpd_to_pdd(qpd
);
814 /* Retrieve PD base */
815 pd_base
= amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd
->vm
);
818 list_add(&n
->list
, &dqm
->queues
);
820 /* Update PD Base in QPD */
821 qpd
->page_table_base
= pd_base
;
822 pr_debug("Updated PD address to 0x%llx\n", pd_base
);
824 retval
= dqm
->asic_ops
.update_qpd(dqm
, qpd
);
826 dqm
->processes_count
++;
830 /* Outside the DQM lock because under the DQM lock we can't do
831 * reclaim or take other locks that others hold while reclaiming.
833 kfd_inc_compute_active(dqm
->dev
);
838 static int unregister_process(struct device_queue_manager
*dqm
,
839 struct qcm_process_device
*qpd
)
842 struct device_process_node
*cur
, *next
;
844 pr_debug("qpd->queues_list is %s\n",
845 list_empty(&qpd
->queues_list
) ? "empty" : "not empty");
850 list_for_each_entry_safe(cur
, next
, &dqm
->queues
, list
) {
851 if (qpd
== cur
->qpd
) {
852 list_del(&cur
->list
);
854 dqm
->processes_count
--;
858 /* qpd not found in dqm list */
863 /* Outside the DQM lock because under the DQM lock we can't do
864 * reclaim or take other locks that others hold while reclaiming.
867 kfd_dec_compute_active(dqm
->dev
);
873 set_pasid_vmid_mapping(struct device_queue_manager
*dqm
, unsigned int pasid
,
876 return dqm
->dev
->kfd2kgd
->set_pasid_vmid_mapping(
877 dqm
->dev
->kgd
, pasid
, vmid
);
880 static void init_interrupts(struct device_queue_manager
*dqm
)
884 for (i
= 0 ; i
< get_pipes_per_mec(dqm
) ; i
++)
885 if (is_pipe_enabled(dqm
, 0, i
))
886 dqm
->dev
->kfd2kgd
->init_interrupts(dqm
->dev
->kgd
, i
);
889 static int initialize_nocpsch(struct device_queue_manager
*dqm
)
893 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm
));
895 dqm
->allocated_queues
= kcalloc(get_pipes_per_mec(dqm
),
896 sizeof(unsigned int), GFP_KERNEL
);
897 if (!dqm
->allocated_queues
)
900 mutex_init(&dqm
->lock_hidden
);
901 INIT_LIST_HEAD(&dqm
->queues
);
902 dqm
->queue_count
= dqm
->next_pipe_to_allocate
= 0;
903 dqm
->sdma_queue_count
= 0;
904 dqm
->xgmi_sdma_queue_count
= 0;
906 for (pipe
= 0; pipe
< get_pipes_per_mec(dqm
); pipe
++) {
907 int pipe_offset
= pipe
* get_queues_per_pipe(dqm
);
909 for (queue
= 0; queue
< get_queues_per_pipe(dqm
); queue
++)
910 if (test_bit(pipe_offset
+ queue
,
911 dqm
->dev
->shared_resources
.queue_bitmap
))
912 dqm
->allocated_queues
[pipe
] |= 1 << queue
;
915 memset(dqm
->vmid_pasid
, 0, sizeof(dqm
->vmid_pasid
));
917 dqm
->sdma_bitmap
= ~0ULL >> (64 - get_num_sdma_queues(dqm
));
918 dqm
->xgmi_sdma_bitmap
= ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm
));
923 static void uninitialize(struct device_queue_manager
*dqm
)
927 WARN_ON(dqm
->queue_count
> 0 || dqm
->processes_count
> 0);
929 kfree(dqm
->allocated_queues
);
930 for (i
= 0 ; i
< KFD_MQD_TYPE_MAX
; i
++)
931 kfree(dqm
->mqd_mgrs
[i
]);
932 mutex_destroy(&dqm
->lock_hidden
);
935 static int start_nocpsch(struct device_queue_manager
*dqm
)
937 pr_info("SW scheduler is used");
938 init_interrupts(dqm
);
940 if (dqm
->dev
->device_info
->asic_family
== CHIP_HAWAII
)
941 return pm_init(&dqm
->packets
, dqm
);
942 dqm
->sched_running
= true;
947 static int stop_nocpsch(struct device_queue_manager
*dqm
)
949 if (dqm
->dev
->device_info
->asic_family
== CHIP_HAWAII
)
950 pm_uninit(&dqm
->packets
, false);
951 dqm
->sched_running
= false;
956 static void pre_reset(struct device_queue_manager
*dqm
)
959 dqm
->is_resetting
= true;
963 static int allocate_sdma_queue(struct device_queue_manager
*dqm
,
968 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
) {
969 if (dqm
->sdma_bitmap
== 0)
971 bit
= __ffs64(dqm
->sdma_bitmap
);
972 dqm
->sdma_bitmap
&= ~(1ULL << bit
);
974 q
->properties
.sdma_engine_id
= q
->sdma_id
%
975 get_num_sdma_engines(dqm
);
976 q
->properties
.sdma_queue_id
= q
->sdma_id
/
977 get_num_sdma_engines(dqm
);
978 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
979 if (dqm
->xgmi_sdma_bitmap
== 0)
981 bit
= __ffs64(dqm
->xgmi_sdma_bitmap
);
982 dqm
->xgmi_sdma_bitmap
&= ~(1ULL << bit
);
984 /* sdma_engine_id is sdma id including
985 * both PCIe-optimized SDMAs and XGMI-
986 * optimized SDMAs. The calculation below
987 * assumes the first N engines are always
988 * PCIe-optimized ones
990 q
->properties
.sdma_engine_id
= get_num_sdma_engines(dqm
) +
991 q
->sdma_id
% get_num_xgmi_sdma_engines(dqm
);
992 q
->properties
.sdma_queue_id
= q
->sdma_id
/
993 get_num_xgmi_sdma_engines(dqm
);
996 pr_debug("SDMA engine id: %d\n", q
->properties
.sdma_engine_id
);
997 pr_debug("SDMA queue id: %d\n", q
->properties
.sdma_queue_id
);
1002 static void deallocate_sdma_queue(struct device_queue_manager
*dqm
,
1005 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
) {
1006 if (q
->sdma_id
>= get_num_sdma_queues(dqm
))
1008 dqm
->sdma_bitmap
|= (1ULL << q
->sdma_id
);
1009 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
1010 if (q
->sdma_id
>= get_num_xgmi_sdma_queues(dqm
))
1012 dqm
->xgmi_sdma_bitmap
|= (1ULL << q
->sdma_id
);
1017 * Device Queue Manager implementation for cp scheduler
1020 static int set_sched_resources(struct device_queue_manager
*dqm
)
1023 struct scheduling_resources res
;
1025 res
.vmid_mask
= dqm
->dev
->shared_resources
.compute_vmid_bitmap
;
1028 for (i
= 0; i
< KGD_MAX_QUEUES
; ++i
) {
1029 mec
= (i
/ dqm
->dev
->shared_resources
.num_queue_per_pipe
)
1030 / dqm
->dev
->shared_resources
.num_pipe_per_mec
;
1032 if (!test_bit(i
, dqm
->dev
->shared_resources
.queue_bitmap
))
1035 /* only acquire queues from the first MEC */
1039 /* This situation may be hit in the future if a new HW
1040 * generation exposes more than 64 queues. If so, the
1041 * definition of res.queue_mask needs updating
1043 if (WARN_ON(i
>= (sizeof(res
.queue_mask
)*8))) {
1044 pr_err("Invalid queue enabled by amdgpu: %d\n", i
);
1048 res
.queue_mask
|= (1ull << i
);
1050 res
.gws_mask
= ~0ull;
1051 res
.oac_mask
= res
.gds_heap_base
= res
.gds_heap_size
= 0;
1053 pr_debug("Scheduling resources:\n"
1054 "vmid mask: 0x%8X\n"
1055 "queue mask: 0x%8llX\n",
1056 res
.vmid_mask
, res
.queue_mask
);
1058 return pm_send_set_resources(&dqm
->packets
, &res
);
1061 static int initialize_cpsch(struct device_queue_manager
*dqm
)
1063 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm
));
1065 mutex_init(&dqm
->lock_hidden
);
1066 INIT_LIST_HEAD(&dqm
->queues
);
1067 dqm
->queue_count
= dqm
->processes_count
= 0;
1068 dqm
->sdma_queue_count
= 0;
1069 dqm
->xgmi_sdma_queue_count
= 0;
1070 dqm
->active_runlist
= false;
1071 dqm
->sdma_bitmap
= ~0ULL >> (64 - get_num_sdma_queues(dqm
));
1072 dqm
->xgmi_sdma_bitmap
= ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm
));
1074 INIT_WORK(&dqm
->hw_exception_work
, kfd_process_hw_exception
);
1079 static int start_cpsch(struct device_queue_manager
*dqm
)
1085 retval
= pm_init(&dqm
->packets
, dqm
);
1087 goto fail_packet_manager_init
;
1089 retval
= set_sched_resources(dqm
);
1091 goto fail_set_sched_resources
;
1093 pr_debug("Allocating fence memory\n");
1095 /* allocate fence memory on the gart */
1096 retval
= kfd_gtt_sa_allocate(dqm
->dev
, sizeof(*dqm
->fence_addr
),
1100 goto fail_allocate_vidmem
;
1102 dqm
->fence_addr
= dqm
->fence_mem
->cpu_ptr
;
1103 dqm
->fence_gpu_addr
= dqm
->fence_mem
->gpu_addr
;
1105 init_interrupts(dqm
);
1108 /* clear hang status when driver try to start the hw scheduler */
1109 dqm
->is_hws_hang
= false;
1110 dqm
->is_resetting
= false;
1111 dqm
->sched_running
= true;
1112 execute_queues_cpsch(dqm
, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
, 0);
1116 fail_allocate_vidmem
:
1117 fail_set_sched_resources
:
1118 pm_uninit(&dqm
->packets
, false);
1119 fail_packet_manager_init
:
1123 static int stop_cpsch(struct device_queue_manager
*dqm
)
1128 if (!dqm
->is_hws_hang
)
1129 unmap_queues_cpsch(dqm
, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES
, 0);
1130 hanging
= dqm
->is_hws_hang
|| dqm
->is_resetting
;
1131 dqm
->sched_running
= false;
1134 kfd_gtt_sa_free(dqm
->dev
, dqm
->fence_mem
);
1135 pm_uninit(&dqm
->packets
, hanging
);
1140 static int create_kernel_queue_cpsch(struct device_queue_manager
*dqm
,
1141 struct kernel_queue
*kq
,
1142 struct qcm_process_device
*qpd
)
1145 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
1146 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1147 dqm
->total_queue_count
);
1153 * Unconditionally increment this counter, regardless of the queue's
1154 * type or whether the queue is active.
1156 dqm
->total_queue_count
++;
1157 pr_debug("Total of %d queues are accountable so far\n",
1158 dqm
->total_queue_count
);
1160 list_add(&kq
->list
, &qpd
->priv_queue_list
);
1162 qpd
->is_debug
= true;
1163 execute_queues_cpsch(dqm
, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
, 0);
1169 static void destroy_kernel_queue_cpsch(struct device_queue_manager
*dqm
,
1170 struct kernel_queue
*kq
,
1171 struct qcm_process_device
*qpd
)
1174 list_del(&kq
->list
);
1176 qpd
->is_debug
= false;
1177 execute_queues_cpsch(dqm
, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES
, 0);
1179 * Unconditionally decrement this counter, regardless of the queue's
1182 dqm
->total_queue_count
--;
1183 pr_debug("Total of %d queues are accountable so far\n",
1184 dqm
->total_queue_count
);
1188 static int create_queue_cpsch(struct device_queue_manager
*dqm
, struct queue
*q
,
1189 struct qcm_process_device
*qpd
)
1192 struct mqd_manager
*mqd_mgr
;
1194 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
1195 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1196 dqm
->total_queue_count
);
1201 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
1202 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
1204 retval
= allocate_sdma_queue(dqm
, q
);
1210 retval
= allocate_doorbell(qpd
, q
);
1212 goto out_deallocate_sdma_queue
;
1214 mqd_mgr
= dqm
->mqd_mgrs
[get_mqd_type_from_queue_type(
1215 q
->properties
.type
)];
1217 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
1218 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
)
1219 dqm
->asic_ops
.init_sdma_vm(dqm
, q
, qpd
);
1220 q
->properties
.tba_addr
= qpd
->tba_addr
;
1221 q
->properties
.tma_addr
= qpd
->tma_addr
;
1222 q
->mqd_mem_obj
= mqd_mgr
->allocate_mqd(mqd_mgr
->dev
, &q
->properties
);
1223 if (!q
->mqd_mem_obj
) {
1225 goto out_deallocate_doorbell
;
1230 * Eviction state logic: mark all queues as evicted, even ones
1231 * not currently active. Restoring inactive queues later only
1232 * updates the is_evicted flag but is a no-op otherwise.
1234 q
->properties
.is_evicted
= !!qpd
->evicted
;
1235 mqd_mgr
->init_mqd(mqd_mgr
, &q
->mqd
, q
->mqd_mem_obj
,
1236 &q
->gart_mqd_addr
, &q
->properties
);
1238 list_add(&q
->list
, &qpd
->queues_list
);
1240 if (q
->properties
.is_active
) {
1242 retval
= execute_queues_cpsch(dqm
,
1243 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
, 0);
1246 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
1247 dqm
->sdma_queue_count
++;
1248 else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
)
1249 dqm
->xgmi_sdma_queue_count
++;
1251 * Unconditionally increment this counter, regardless of the queue's
1252 * type or whether the queue is active.
1254 dqm
->total_queue_count
++;
1256 pr_debug("Total of %d queues are accountable so far\n",
1257 dqm
->total_queue_count
);
1262 out_deallocate_doorbell
:
1263 deallocate_doorbell(qpd
, q
);
1264 out_deallocate_sdma_queue
:
1265 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
||
1266 q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
1268 deallocate_sdma_queue(dqm
, q
);
1275 int amdkfd_fence_wait_timeout(unsigned int *fence_addr
,
1276 unsigned int fence_value
,
1277 unsigned int timeout_ms
)
1279 unsigned long end_jiffies
= msecs_to_jiffies(timeout_ms
) + jiffies
;
1281 while (*fence_addr
!= fence_value
) {
1282 if (time_after(jiffies
, end_jiffies
)) {
1283 pr_err("qcm fence wait loop timeout expired\n");
1284 /* In HWS case, this is used to halt the driver thread
1285 * in order not to mess up CP states before doing
1286 * scandumps for FW debugging.
1288 while (halt_if_hws_hang
)
1299 static int unmap_sdma_queues(struct device_queue_manager
*dqm
)
1303 for (i
= 0; i
< dqm
->dev
->device_info
->num_sdma_engines
+
1304 dqm
->dev
->device_info
->num_xgmi_sdma_engines
; i
++) {
1305 retval
= pm_send_unmap_queue(&dqm
->packets
, KFD_QUEUE_TYPE_SDMA
,
1306 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
, 0, false, i
);
1313 /* dqm->lock mutex has to be locked before calling this function */
1314 static int map_queues_cpsch(struct device_queue_manager
*dqm
)
1318 if (!dqm
->sched_running
)
1320 if (dqm
->queue_count
<= 0 || dqm
->processes_count
<= 0)
1322 if (dqm
->active_runlist
)
1325 retval
= pm_send_runlist(&dqm
->packets
, &dqm
->queues
);
1326 pr_debug("%s sent runlist\n", __func__
);
1328 pr_err("failed to execute runlist\n");
1331 dqm
->active_runlist
= true;
1336 /* dqm->lock mutex has to be locked before calling this function */
1337 static int unmap_queues_cpsch(struct device_queue_manager
*dqm
,
1338 enum kfd_unmap_queues_filter filter
,
1339 uint32_t filter_param
)
1343 if (!dqm
->sched_running
)
1345 if (dqm
->is_hws_hang
)
1347 if (!dqm
->active_runlist
)
1350 pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
1351 dqm
->sdma_queue_count
, dqm
->xgmi_sdma_queue_count
);
1353 if (dqm
->sdma_queue_count
> 0 || dqm
->xgmi_sdma_queue_count
)
1354 unmap_sdma_queues(dqm
);
1356 retval
= pm_send_unmap_queue(&dqm
->packets
, KFD_QUEUE_TYPE_COMPUTE
,
1357 filter
, filter_param
, false, 0);
1361 *dqm
->fence_addr
= KFD_FENCE_INIT
;
1362 pm_send_query_status(&dqm
->packets
, dqm
->fence_gpu_addr
,
1363 KFD_FENCE_COMPLETED
);
1364 /* should be timed out */
1365 retval
= amdkfd_fence_wait_timeout(dqm
->fence_addr
, KFD_FENCE_COMPLETED
,
1366 queue_preemption_timeout_ms
);
1368 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1369 dqm
->is_hws_hang
= true;
1370 /* It's possible we're detecting a HWS hang in the
1371 * middle of a GPU reset. No need to schedule another
1372 * reset in this case.
1374 if (!dqm
->is_resetting
)
1375 schedule_work(&dqm
->hw_exception_work
);
1379 pm_release_ib(&dqm
->packets
);
1380 dqm
->active_runlist
= false;
1385 /* dqm->lock mutex has to be locked before calling this function */
1386 static int execute_queues_cpsch(struct device_queue_manager
*dqm
,
1387 enum kfd_unmap_queues_filter filter
,
1388 uint32_t filter_param
)
1392 if (dqm
->is_hws_hang
)
1394 retval
= unmap_queues_cpsch(dqm
, filter
, filter_param
);
1398 return map_queues_cpsch(dqm
);
1401 static int destroy_queue_cpsch(struct device_queue_manager
*dqm
,
1402 struct qcm_process_device
*qpd
,
1406 struct mqd_manager
*mqd_mgr
;
1410 /* remove queue from list to prevent rescheduling after preemption */
1413 if (qpd
->is_debug
) {
1415 * error, currently we do not allow to destroy a queue
1416 * of a currently debugged process
1419 goto failed_try_destroy_debugged_queue
;
1423 mqd_mgr
= dqm
->mqd_mgrs
[get_mqd_type_from_queue_type(
1424 q
->properties
.type
)];
1426 deallocate_doorbell(qpd
, q
);
1428 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
) {
1429 dqm
->sdma_queue_count
--;
1430 deallocate_sdma_queue(dqm
, q
);
1431 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
1432 dqm
->xgmi_sdma_queue_count
--;
1433 deallocate_sdma_queue(dqm
, q
);
1438 if (q
->properties
.is_active
) {
1440 retval
= execute_queues_cpsch(dqm
,
1441 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
, 0);
1442 if (retval
== -ETIME
)
1443 qpd
->reset_wavefronts
= true;
1447 * Unconditionally decrement this counter, regardless of the queue's
1450 dqm
->total_queue_count
--;
1451 pr_debug("Total of %d queues are accountable so far\n",
1452 dqm
->total_queue_count
);
1456 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1457 mqd_mgr
->free_mqd(mqd_mgr
, q
->mqd
, q
->mqd_mem_obj
);
1461 failed_try_destroy_debugged_queue
:
1468 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1469 * stay in user mode.
1471 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1472 /* APE1 limit is inclusive and 64K aligned. */
1473 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1475 static bool set_cache_memory_policy(struct device_queue_manager
*dqm
,
1476 struct qcm_process_device
*qpd
,
1477 enum cache_policy default_policy
,
1478 enum cache_policy alternate_policy
,
1479 void __user
*alternate_aperture_base
,
1480 uint64_t alternate_aperture_size
)
1484 if (!dqm
->asic_ops
.set_cache_memory_policy
)
1489 if (alternate_aperture_size
== 0) {
1490 /* base > limit disables APE1 */
1491 qpd
->sh_mem_ape1_base
= 1;
1492 qpd
->sh_mem_ape1_limit
= 0;
1495 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1496 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1497 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1498 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1499 * Verify that the base and size parameters can be
1500 * represented in this format and convert them.
1501 * Additionally restrict APE1 to user-mode addresses.
1504 uint64_t base
= (uintptr_t)alternate_aperture_base
;
1505 uint64_t limit
= base
+ alternate_aperture_size
- 1;
1507 if (limit
<= base
|| (base
& APE1_FIXED_BITS_MASK
) != 0 ||
1508 (limit
& APE1_FIXED_BITS_MASK
) != APE1_LIMIT_ALIGNMENT
) {
1513 qpd
->sh_mem_ape1_base
= base
>> 16;
1514 qpd
->sh_mem_ape1_limit
= limit
>> 16;
1517 retval
= dqm
->asic_ops
.set_cache_memory_policy(
1522 alternate_aperture_base
,
1523 alternate_aperture_size
);
1525 if ((dqm
->sched_policy
== KFD_SCHED_POLICY_NO_HWS
) && (qpd
->vmid
!= 0))
1526 program_sh_mem_settings(dqm
, qpd
);
1528 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1529 qpd
->sh_mem_config
, qpd
->sh_mem_ape1_base
,
1530 qpd
->sh_mem_ape1_limit
);
1537 static int set_trap_handler(struct device_queue_manager
*dqm
,
1538 struct qcm_process_device
*qpd
,
1544 if (dqm
->dev
->cwsr_enabled
) {
1545 /* Jump from CWSR trap handler to user trap */
1546 tma
= (uint64_t *)(qpd
->cwsr_kaddr
+ KFD_CWSR_TMA_OFFSET
);
1550 qpd
->tba_addr
= tba_addr
;
1551 qpd
->tma_addr
= tma_addr
;
1557 static int process_termination_nocpsch(struct device_queue_manager
*dqm
,
1558 struct qcm_process_device
*qpd
)
1560 struct queue
*q
, *next
;
1561 struct device_process_node
*cur
, *next_dpn
;
1567 /* Clear all user mode queues */
1568 list_for_each_entry_safe(q
, next
, &qpd
->queues_list
, list
) {
1571 ret
= destroy_queue_nocpsch_locked(dqm
, qpd
, q
);
1576 /* Unregister process */
1577 list_for_each_entry_safe(cur
, next_dpn
, &dqm
->queues
, list
) {
1578 if (qpd
== cur
->qpd
) {
1579 list_del(&cur
->list
);
1581 dqm
->processes_count
--;
1589 /* Outside the DQM lock because under the DQM lock we can't do
1590 * reclaim or take other locks that others hold while reclaiming.
1593 kfd_dec_compute_active(dqm
->dev
);
1598 static int get_wave_state(struct device_queue_manager
*dqm
,
1600 void __user
*ctl_stack
,
1601 u32
*ctl_stack_used_size
,
1602 u32
*save_area_used_size
)
1604 struct mqd_manager
*mqd_mgr
;
1609 if (q
->properties
.type
!= KFD_QUEUE_TYPE_COMPUTE
||
1610 q
->properties
.is_active
|| !q
->device
->cwsr_enabled
) {
1615 mqd_mgr
= dqm
->mqd_mgrs
[KFD_MQD_TYPE_CP
];
1617 if (!mqd_mgr
->get_wave_state
) {
1622 r
= mqd_mgr
->get_wave_state(mqd_mgr
, q
->mqd
, ctl_stack
,
1623 ctl_stack_used_size
, save_area_used_size
);
1630 static int process_termination_cpsch(struct device_queue_manager
*dqm
,
1631 struct qcm_process_device
*qpd
)
1634 struct queue
*q
, *next
;
1635 struct kernel_queue
*kq
, *kq_next
;
1636 struct mqd_manager
*mqd_mgr
;
1637 struct device_process_node
*cur
, *next_dpn
;
1638 enum kfd_unmap_queues_filter filter
=
1639 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES
;
1646 /* Clean all kernel queues */
1647 list_for_each_entry_safe(kq
, kq_next
, &qpd
->priv_queue_list
, list
) {
1648 list_del(&kq
->list
);
1650 qpd
->is_debug
= false;
1651 dqm
->total_queue_count
--;
1652 filter
= KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES
;
1655 /* Clear all user mode queues */
1656 list_for_each_entry(q
, &qpd
->queues_list
, list
) {
1657 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
) {
1658 dqm
->sdma_queue_count
--;
1659 deallocate_sdma_queue(dqm
, q
);
1660 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA_XGMI
) {
1661 dqm
->xgmi_sdma_queue_count
--;
1662 deallocate_sdma_queue(dqm
, q
);
1665 if (q
->properties
.is_active
)
1668 dqm
->total_queue_count
--;
1671 /* Unregister process */
1672 list_for_each_entry_safe(cur
, next_dpn
, &dqm
->queues
, list
) {
1673 if (qpd
== cur
->qpd
) {
1674 list_del(&cur
->list
);
1676 dqm
->processes_count
--;
1682 retval
= execute_queues_cpsch(dqm
, filter
, 0);
1683 if ((!dqm
->is_hws_hang
) && (retval
|| qpd
->reset_wavefronts
)) {
1684 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm
->dev
);
1685 dbgdev_wave_reset_wavefronts(dqm
->dev
, qpd
->pqm
->process
);
1686 qpd
->reset_wavefronts
= false;
1691 /* Outside the DQM lock because under the DQM lock we can't do
1692 * reclaim or take other locks that others hold while reclaiming.
1695 kfd_dec_compute_active(dqm
->dev
);
1697 /* Lastly, free mqd resources.
1698 * Do free_mqd() after dqm_unlock to avoid circular locking.
1700 list_for_each_entry_safe(q
, next
, &qpd
->queues_list
, list
) {
1701 mqd_mgr
= dqm
->mqd_mgrs
[get_mqd_type_from_queue_type(
1702 q
->properties
.type
)];
1705 mqd_mgr
->free_mqd(mqd_mgr
, q
->mqd
, q
->mqd_mem_obj
);
1711 static int init_mqd_managers(struct device_queue_manager
*dqm
)
1714 struct mqd_manager
*mqd_mgr
;
1716 for (i
= 0; i
< KFD_MQD_TYPE_MAX
; i
++) {
1717 mqd_mgr
= dqm
->asic_ops
.mqd_manager_init(i
, dqm
->dev
);
1719 pr_err("mqd manager [%d] initialization failed\n", i
);
1722 dqm
->mqd_mgrs
[i
] = mqd_mgr
;
1728 for (j
= 0; j
< i
; j
++) {
1729 kfree(dqm
->mqd_mgrs
[j
]);
1730 dqm
->mqd_mgrs
[j
] = NULL
;
1736 /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1737 static int allocate_hiq_sdma_mqd(struct device_queue_manager
*dqm
)
1740 struct kfd_dev
*dev
= dqm
->dev
;
1741 struct kfd_mem_obj
*mem_obj
= &dqm
->hiq_sdma_mqd
;
1742 uint32_t size
= dqm
->mqd_mgrs
[KFD_MQD_TYPE_SDMA
]->mqd_size
*
1743 (dev
->device_info
->num_sdma_engines
+
1744 dev
->device_info
->num_xgmi_sdma_engines
) *
1745 dev
->device_info
->num_sdma_queues_per_engine
+
1746 dqm
->mqd_mgrs
[KFD_MQD_TYPE_HIQ
]->mqd_size
;
1748 retval
= amdgpu_amdkfd_alloc_gtt_mem(dev
->kgd
, size
,
1749 &(mem_obj
->gtt_mem
), &(mem_obj
->gpu_addr
),
1750 (void *)&(mem_obj
->cpu_ptr
), true);
1755 struct device_queue_manager
*device_queue_manager_init(struct kfd_dev
*dev
)
1757 struct device_queue_manager
*dqm
;
1759 pr_debug("Loading device queue manager\n");
1761 dqm
= kzalloc(sizeof(*dqm
), GFP_KERNEL
);
1765 switch (dev
->device_info
->asic_family
) {
1766 /* HWS is not available on Hawaii. */
1768 /* HWS depends on CWSR for timely dequeue. CWSR is not
1769 * available on Tonga.
1771 * FIXME: This argument also applies to Kaveri.
1774 dqm
->sched_policy
= KFD_SCHED_POLICY_NO_HWS
;
1777 dqm
->sched_policy
= sched_policy
;
1782 switch (dqm
->sched_policy
) {
1783 case KFD_SCHED_POLICY_HWS
:
1784 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION
:
1785 /* initialize dqm for cp scheduling */
1786 dqm
->ops
.create_queue
= create_queue_cpsch
;
1787 dqm
->ops
.initialize
= initialize_cpsch
;
1788 dqm
->ops
.start
= start_cpsch
;
1789 dqm
->ops
.stop
= stop_cpsch
;
1790 dqm
->ops
.pre_reset
= pre_reset
;
1791 dqm
->ops
.destroy_queue
= destroy_queue_cpsch
;
1792 dqm
->ops
.update_queue
= update_queue
;
1793 dqm
->ops
.register_process
= register_process
;
1794 dqm
->ops
.unregister_process
= unregister_process
;
1795 dqm
->ops
.uninitialize
= uninitialize
;
1796 dqm
->ops
.create_kernel_queue
= create_kernel_queue_cpsch
;
1797 dqm
->ops
.destroy_kernel_queue
= destroy_kernel_queue_cpsch
;
1798 dqm
->ops
.set_cache_memory_policy
= set_cache_memory_policy
;
1799 dqm
->ops
.set_trap_handler
= set_trap_handler
;
1800 dqm
->ops
.process_termination
= process_termination_cpsch
;
1801 dqm
->ops
.evict_process_queues
= evict_process_queues_cpsch
;
1802 dqm
->ops
.restore_process_queues
= restore_process_queues_cpsch
;
1803 dqm
->ops
.get_wave_state
= get_wave_state
;
1805 case KFD_SCHED_POLICY_NO_HWS
:
1806 /* initialize dqm for no cp scheduling */
1807 dqm
->ops
.start
= start_nocpsch
;
1808 dqm
->ops
.stop
= stop_nocpsch
;
1809 dqm
->ops
.pre_reset
= pre_reset
;
1810 dqm
->ops
.create_queue
= create_queue_nocpsch
;
1811 dqm
->ops
.destroy_queue
= destroy_queue_nocpsch
;
1812 dqm
->ops
.update_queue
= update_queue
;
1813 dqm
->ops
.register_process
= register_process
;
1814 dqm
->ops
.unregister_process
= unregister_process
;
1815 dqm
->ops
.initialize
= initialize_nocpsch
;
1816 dqm
->ops
.uninitialize
= uninitialize
;
1817 dqm
->ops
.set_cache_memory_policy
= set_cache_memory_policy
;
1818 dqm
->ops
.set_trap_handler
= set_trap_handler
;
1819 dqm
->ops
.process_termination
= process_termination_nocpsch
;
1820 dqm
->ops
.evict_process_queues
= evict_process_queues_nocpsch
;
1821 dqm
->ops
.restore_process_queues
=
1822 restore_process_queues_nocpsch
;
1823 dqm
->ops
.get_wave_state
= get_wave_state
;
1826 pr_err("Invalid scheduling policy %d\n", dqm
->sched_policy
);
1830 switch (dev
->device_info
->asic_family
) {
1832 device_queue_manager_init_vi(&dqm
->asic_ops
);
1836 device_queue_manager_init_cik(&dqm
->asic_ops
);
1840 device_queue_manager_init_cik_hawaii(&dqm
->asic_ops
);
1845 case CHIP_POLARIS10
:
1846 case CHIP_POLARIS11
:
1847 case CHIP_POLARIS12
:
1849 device_queue_manager_init_vi_tonga(&dqm
->asic_ops
);
1858 device_queue_manager_init_v9(&dqm
->asic_ops
);
1863 device_queue_manager_init_v10_navi10(&dqm
->asic_ops
);
1866 WARN(1, "Unexpected ASIC family %u",
1867 dev
->device_info
->asic_family
);
1871 if (init_mqd_managers(dqm
))
1874 if (allocate_hiq_sdma_mqd(dqm
)) {
1875 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1879 if (!dqm
->ops
.initialize(dqm
))
1887 static void deallocate_hiq_sdma_mqd(struct kfd_dev
*dev
,
1888 struct kfd_mem_obj
*mqd
)
1890 WARN(!mqd
, "No hiq sdma mqd trunk to free");
1892 amdgpu_amdkfd_free_gtt_mem(dev
->kgd
, mqd
->gtt_mem
);
1895 void device_queue_manager_uninit(struct device_queue_manager
*dqm
)
1897 dqm
->ops
.uninitialize(dqm
);
1898 deallocate_hiq_sdma_mqd(dqm
->dev
, &dqm
->hiq_sdma_mqd
);
1902 int kfd_process_vm_fault(struct device_queue_manager
*dqm
,
1905 struct kfd_process_device
*pdd
;
1906 struct kfd_process
*p
= kfd_lookup_process_by_pasid(pasid
);
1911 pdd
= kfd_get_process_device_data(dqm
->dev
, p
);
1913 ret
= dqm
->ops
.evict_process_queues(dqm
, &pdd
->qpd
);
1914 kfd_unref_process(p
);
1919 static void kfd_process_hw_exception(struct work_struct
*work
)
1921 struct device_queue_manager
*dqm
= container_of(work
,
1922 struct device_queue_manager
, hw_exception_work
);
1923 amdgpu_amdkfd_gpu_reset(dqm
->dev
->kgd
);
1926 #if defined(CONFIG_DEBUG_FS)
1928 static void seq_reg_dump(struct seq_file
*m
,
1929 uint32_t (*dump
)[2], uint32_t n_regs
)
1933 for (i
= 0, count
= 0; i
< n_regs
; i
++) {
1935 dump
[i
-1][0] + sizeof(uint32_t) != dump
[i
][0]) {
1936 seq_printf(m
, "%s %08x: %08x",
1938 dump
[i
][0], dump
[i
][1]);
1941 seq_printf(m
, " %08x", dump
[i
][1]);
1949 int dqm_debugfs_hqds(struct seq_file
*m
, void *data
)
1951 struct device_queue_manager
*dqm
= data
;
1952 uint32_t (*dump
)[2], n_regs
;
1956 if (!dqm
->sched_running
) {
1957 seq_printf(m
, " Device is stopped\n");
1962 r
= dqm
->dev
->kfd2kgd
->hqd_dump(dqm
->dev
->kgd
,
1963 KFD_CIK_HIQ_PIPE
, KFD_CIK_HIQ_QUEUE
,
1966 seq_printf(m
, " HIQ on MEC %d Pipe %d Queue %d\n",
1967 KFD_CIK_HIQ_PIPE
/get_pipes_per_mec(dqm
)+1,
1968 KFD_CIK_HIQ_PIPE
%get_pipes_per_mec(dqm
),
1970 seq_reg_dump(m
, dump
, n_regs
);
1975 for (pipe
= 0; pipe
< get_pipes_per_mec(dqm
); pipe
++) {
1976 int pipe_offset
= pipe
* get_queues_per_pipe(dqm
);
1978 for (queue
= 0; queue
< get_queues_per_pipe(dqm
); queue
++) {
1979 if (!test_bit(pipe_offset
+ queue
,
1980 dqm
->dev
->shared_resources
.queue_bitmap
))
1983 r
= dqm
->dev
->kfd2kgd
->hqd_dump(
1984 dqm
->dev
->kgd
, pipe
, queue
, &dump
, &n_regs
);
1988 seq_printf(m
, " CP Pipe %d, Queue %d\n",
1990 seq_reg_dump(m
, dump
, n_regs
);
1996 for (pipe
= 0; pipe
< get_num_sdma_engines(dqm
) +
1997 get_num_xgmi_sdma_engines(dqm
); pipe
++) {
1999 queue
< dqm
->dev
->device_info
->num_sdma_queues_per_engine
;
2001 r
= dqm
->dev
->kfd2kgd
->hqd_sdma_dump(
2002 dqm
->dev
->kgd
, pipe
, queue
, &dump
, &n_regs
);
2006 seq_printf(m
, " SDMA Engine %d, RLC %d\n",
2008 seq_reg_dump(m
, dump
, n_regs
);
2017 int dqm_debugfs_execute_queues(struct device_queue_manager
*dqm
)
2022 dqm
->active_runlist
= true;
2023 r
= execute_queues_cpsch(dqm
, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES
, 0);