treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.c
blob2870553a2ce0e9896453ff858ff49bf08111f434
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/ratelimit.h>
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/types.h>
29 #include <linux/bitops.h>
30 #include <linux/sched.h>
31 #include "kfd_priv.h"
32 #include "kfd_device_queue_manager.h"
33 #include "kfd_mqd_manager.h"
34 #include "cik_regs.h"
35 #include "kfd_kernel_queue.h"
36 #include "amdgpu_amdkfd.h"
38 /* Size of the per-pipe EOP queue */
39 #define CIK_HPD_EOP_BYTES_LOG2 11
40 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
42 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
45 static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
48 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
52 static int map_queues_cpsch(struct device_queue_manager *dqm);
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55 struct queue *q);
57 static inline void deallocate_hqd(struct device_queue_manager *dqm,
58 struct queue *q);
59 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60 static int allocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
62 static void kfd_process_hw_exception(struct work_struct *work);
64 static inline
65 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
74 int i;
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.queue_bitmap))
82 return true;
83 return false;
86 unsigned int get_queues_num(struct device_queue_manager *dqm)
88 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
89 KGD_MAX_QUEUES);
92 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
94 return dqm->dev->shared_resources.num_queue_per_pipe;
97 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
99 return dqm->dev->shared_resources.num_pipe_per_mec;
102 static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
104 return dqm->dev->device_info->num_sdma_engines;
107 static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
112 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
114 return dqm->dev->device_info->num_sdma_engines
115 * dqm->dev->device_info->num_sdma_queues_per_engine;
118 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
120 return dqm->dev->device_info->num_xgmi_sdma_engines
121 * dqm->dev->device_info->num_sdma_queues_per_engine;
124 void program_sh_mem_settings(struct device_queue_manager *dqm,
125 struct qcm_process_device *qpd)
127 return dqm->dev->kfd2kgd->program_sh_mem_settings(
128 dqm->dev->kgd, qpd->vmid,
129 qpd->sh_mem_config,
130 qpd->sh_mem_ape1_base,
131 qpd->sh_mem_ape1_limit,
132 qpd->sh_mem_bases);
135 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
137 struct kfd_dev *dev = qpd->dqm->dev;
139 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
140 /* On pre-SOC15 chips we need to use the queue ID to
141 * preserve the user mode ABI.
143 q->doorbell_id = q->properties.queue_id;
144 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
145 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
146 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
147 * doorbell assignments based on the engine and queue id.
148 * The doobell index distance between RLC (2*i) and (2*i+1)
149 * for a SDMA engine is 512.
151 uint32_t *idx_offset =
152 dev->shared_resources.sdma_doorbell_idx;
154 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
155 + (q->properties.sdma_queue_id & 1)
156 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
157 + (q->properties.sdma_queue_id >> 1);
158 } else {
159 /* For CP queues on SOC15 reserve a free doorbell ID */
160 unsigned int found;
162 found = find_first_zero_bit(qpd->doorbell_bitmap,
163 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
164 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
165 pr_debug("No doorbells available");
166 return -EBUSY;
168 set_bit(found, qpd->doorbell_bitmap);
169 q->doorbell_id = found;
172 q->properties.doorbell_off =
173 kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
174 q->doorbell_id);
176 return 0;
179 static void deallocate_doorbell(struct qcm_process_device *qpd,
180 struct queue *q)
182 unsigned int old;
183 struct kfd_dev *dev = qpd->dqm->dev;
185 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
186 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
187 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
188 return;
190 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
191 WARN_ON(!old);
194 static int allocate_vmid(struct device_queue_manager *dqm,
195 struct qcm_process_device *qpd,
196 struct queue *q)
198 int allocated_vmid = -1, i;
200 for (i = dqm->dev->vm_info.first_vmid_kfd;
201 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
202 if (!dqm->vmid_pasid[i]) {
203 allocated_vmid = i;
204 break;
208 if (allocated_vmid < 0) {
209 pr_err("no more vmid to allocate\n");
210 return -ENOSPC;
213 pr_debug("vmid allocated: %d\n", allocated_vmid);
215 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
217 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
219 qpd->vmid = allocated_vmid;
220 q->properties.vmid = allocated_vmid;
222 program_sh_mem_settings(dqm, qpd);
224 /* qpd->page_table_base is set earlier when register_process()
225 * is called, i.e. when the first queue is created.
227 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
228 qpd->vmid,
229 qpd->page_table_base);
230 /* invalidate the VM context after pasid and vmid mapping is set up */
231 kfd_flush_tlb(qpd_to_pdd(qpd));
233 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
234 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
235 qpd->sh_hidden_private_base, qpd->vmid);
237 return 0;
240 static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
241 struct qcm_process_device *qpd)
243 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
244 int ret;
246 if (!qpd->ib_kaddr)
247 return -ENOMEM;
249 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
250 if (ret)
251 return ret;
253 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
254 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
255 pmf->release_mem_size / sizeof(uint32_t));
258 static void deallocate_vmid(struct device_queue_manager *dqm,
259 struct qcm_process_device *qpd,
260 struct queue *q)
262 /* On GFX v7, CP doesn't flush TC at dequeue */
263 if (q->device->device_info->asic_family == CHIP_HAWAII)
264 if (flush_texture_cache_nocpsch(q->device, qpd))
265 pr_err("Failed to flush TC\n");
267 kfd_flush_tlb(qpd_to_pdd(qpd));
269 /* Release the vmid mapping */
270 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
271 dqm->vmid_pasid[qpd->vmid] = 0;
273 qpd->vmid = 0;
274 q->properties.vmid = 0;
277 static int create_queue_nocpsch(struct device_queue_manager *dqm,
278 struct queue *q,
279 struct qcm_process_device *qpd)
281 struct mqd_manager *mqd_mgr;
282 int retval;
284 print_queue(q);
286 dqm_lock(dqm);
288 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
289 pr_warn("Can't create new usermode queue because %d queues were already created\n",
290 dqm->total_queue_count);
291 retval = -EPERM;
292 goto out_unlock;
295 if (list_empty(&qpd->queues_list)) {
296 retval = allocate_vmid(dqm, qpd, q);
297 if (retval)
298 goto out_unlock;
300 q->properties.vmid = qpd->vmid;
302 * Eviction state logic: mark all queues as evicted, even ones
303 * not currently active. Restoring inactive queues later only
304 * updates the is_evicted flag but is a no-op otherwise.
306 q->properties.is_evicted = !!qpd->evicted;
308 q->properties.tba_addr = qpd->tba_addr;
309 q->properties.tma_addr = qpd->tma_addr;
311 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
312 q->properties.type)];
313 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
314 retval = allocate_hqd(dqm, q);
315 if (retval)
316 goto deallocate_vmid;
317 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
318 q->pipe, q->queue);
319 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
320 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
321 retval = allocate_sdma_queue(dqm, q);
322 if (retval)
323 goto deallocate_vmid;
324 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
327 retval = allocate_doorbell(qpd, q);
328 if (retval)
329 goto out_deallocate_hqd;
331 /* Temporarily release dqm lock to avoid a circular lock dependency */
332 dqm_unlock(dqm);
333 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
334 dqm_lock(dqm);
336 if (!q->mqd_mem_obj) {
337 retval = -ENOMEM;
338 goto out_deallocate_doorbell;
340 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
341 &q->gart_mqd_addr, &q->properties);
342 if (q->properties.is_active) {
343 if (!dqm->sched_running) {
344 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
345 goto add_queue_to_list;
348 if (WARN(q->process->mm != current->mm,
349 "should only run in user thread"))
350 retval = -EFAULT;
351 else
352 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
353 q->queue, &q->properties, current->mm);
354 if (retval)
355 goto out_free_mqd;
358 add_queue_to_list:
359 list_add(&q->list, &qpd->queues_list);
360 qpd->queue_count++;
361 if (q->properties.is_active)
362 dqm->queue_count++;
364 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
365 dqm->sdma_queue_count++;
366 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
367 dqm->xgmi_sdma_queue_count++;
370 * Unconditionally increment this counter, regardless of the queue's
371 * type or whether the queue is active.
373 dqm->total_queue_count++;
374 pr_debug("Total of %d queues are accountable so far\n",
375 dqm->total_queue_count);
376 goto out_unlock;
378 out_free_mqd:
379 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
380 out_deallocate_doorbell:
381 deallocate_doorbell(qpd, q);
382 out_deallocate_hqd:
383 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
384 deallocate_hqd(dqm, q);
385 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
386 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
387 deallocate_sdma_queue(dqm, q);
388 deallocate_vmid:
389 if (list_empty(&qpd->queues_list))
390 deallocate_vmid(dqm, qpd, q);
391 out_unlock:
392 dqm_unlock(dqm);
393 return retval;
396 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
398 bool set;
399 int pipe, bit, i;
401 set = false;
403 for (pipe = dqm->next_pipe_to_allocate, i = 0;
404 i < get_pipes_per_mec(dqm);
405 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
407 if (!is_pipe_enabled(dqm, 0, pipe))
408 continue;
410 if (dqm->allocated_queues[pipe] != 0) {
411 bit = ffs(dqm->allocated_queues[pipe]) - 1;
412 dqm->allocated_queues[pipe] &= ~(1 << bit);
413 q->pipe = pipe;
414 q->queue = bit;
415 set = true;
416 break;
420 if (!set)
421 return -EBUSY;
423 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
424 /* horizontal hqd allocation */
425 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
427 return 0;
430 static inline void deallocate_hqd(struct device_queue_manager *dqm,
431 struct queue *q)
433 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
436 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
437 * to avoid asynchronized access
439 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
440 struct qcm_process_device *qpd,
441 struct queue *q)
443 int retval;
444 struct mqd_manager *mqd_mgr;
446 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
447 q->properties.type)];
449 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
450 deallocate_hqd(dqm, q);
451 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
452 dqm->sdma_queue_count--;
453 deallocate_sdma_queue(dqm, q);
454 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
455 dqm->xgmi_sdma_queue_count--;
456 deallocate_sdma_queue(dqm, q);
457 } else {
458 pr_debug("q->properties.type %d is invalid\n",
459 q->properties.type);
460 return -EINVAL;
462 dqm->total_queue_count--;
464 deallocate_doorbell(qpd, q);
466 if (!dqm->sched_running) {
467 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
468 return 0;
471 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
472 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
473 KFD_UNMAP_LATENCY_MS,
474 q->pipe, q->queue);
475 if (retval == -ETIME)
476 qpd->reset_wavefronts = true;
478 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
480 list_del(&q->list);
481 if (list_empty(&qpd->queues_list)) {
482 if (qpd->reset_wavefronts) {
483 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
484 dqm->dev);
485 /* dbgdev_wave_reset_wavefronts has to be called before
486 * deallocate_vmid(), i.e. when vmid is still in use.
488 dbgdev_wave_reset_wavefronts(dqm->dev,
489 qpd->pqm->process);
490 qpd->reset_wavefronts = false;
493 deallocate_vmid(dqm, qpd, q);
495 qpd->queue_count--;
496 if (q->properties.is_active)
497 dqm->queue_count--;
499 return retval;
502 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
503 struct qcm_process_device *qpd,
504 struct queue *q)
506 int retval;
508 dqm_lock(dqm);
509 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
510 dqm_unlock(dqm);
512 return retval;
515 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
517 int retval = 0;
518 struct mqd_manager *mqd_mgr;
519 struct kfd_process_device *pdd;
520 bool prev_active = false;
522 dqm_lock(dqm);
523 pdd = kfd_get_process_device_data(q->device, q->process);
524 if (!pdd) {
525 retval = -ENODEV;
526 goto out_unlock;
528 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
529 q->properties.type)];
531 /* Save previous activity state for counters */
532 prev_active = q->properties.is_active;
534 /* Make sure the queue is unmapped before updating the MQD */
535 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
536 retval = unmap_queues_cpsch(dqm,
537 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
538 if (retval) {
539 pr_err("unmap queue failed\n");
540 goto out_unlock;
542 } else if (prev_active &&
543 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
544 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
545 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
547 if (!dqm->sched_running) {
548 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
549 goto out_unlock;
552 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
553 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
554 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
555 if (retval) {
556 pr_err("destroy mqd failed\n");
557 goto out_unlock;
561 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
564 * check active state vs. the previous state and modify
565 * counter accordingly. map_queues_cpsch uses the
566 * dqm->queue_count to determine whether a new runlist must be
567 * uploaded.
569 if (q->properties.is_active && !prev_active)
570 dqm->queue_count++;
571 else if (!q->properties.is_active && prev_active)
572 dqm->queue_count--;
574 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
575 retval = map_queues_cpsch(dqm);
576 else if (q->properties.is_active &&
577 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
578 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
579 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
580 if (WARN(q->process->mm != current->mm,
581 "should only run in user thread"))
582 retval = -EFAULT;
583 else
584 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
585 q->pipe, q->queue,
586 &q->properties, current->mm);
589 out_unlock:
590 dqm_unlock(dqm);
591 return retval;
594 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
595 struct qcm_process_device *qpd)
597 struct queue *q;
598 struct mqd_manager *mqd_mgr;
599 struct kfd_process_device *pdd;
600 int retval, ret = 0;
602 dqm_lock(dqm);
603 if (qpd->evicted++ > 0) /* already evicted, do nothing */
604 goto out;
606 pdd = qpd_to_pdd(qpd);
607 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
608 pdd->process->pasid);
610 /* Mark all queues as evicted. Deactivate all active queues on
611 * the qpd.
613 list_for_each_entry(q, &qpd->queues_list, list) {
614 q->properties.is_evicted = true;
615 if (!q->properties.is_active)
616 continue;
618 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
619 q->properties.type)];
620 q->properties.is_active = false;
621 dqm->queue_count--;
623 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
624 continue;
626 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
627 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
628 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
629 if (retval && !ret)
630 /* Return the first error, but keep going to
631 * maintain a consistent eviction state
633 ret = retval;
636 out:
637 dqm_unlock(dqm);
638 return ret;
641 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
642 struct qcm_process_device *qpd)
644 struct queue *q;
645 struct kfd_process_device *pdd;
646 int retval = 0;
648 dqm_lock(dqm);
649 if (qpd->evicted++ > 0) /* already evicted, do nothing */
650 goto out;
652 pdd = qpd_to_pdd(qpd);
653 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
654 pdd->process->pasid);
656 /* Mark all queues as evicted. Deactivate all active queues on
657 * the qpd.
659 list_for_each_entry(q, &qpd->queues_list, list) {
660 q->properties.is_evicted = true;
661 if (!q->properties.is_active)
662 continue;
664 q->properties.is_active = false;
665 dqm->queue_count--;
667 retval = execute_queues_cpsch(dqm,
668 qpd->is_debug ?
669 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
670 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
672 out:
673 dqm_unlock(dqm);
674 return retval;
677 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
678 struct qcm_process_device *qpd)
680 struct mm_struct *mm = NULL;
681 struct queue *q;
682 struct mqd_manager *mqd_mgr;
683 struct kfd_process_device *pdd;
684 uint64_t pd_base;
685 int retval, ret = 0;
687 pdd = qpd_to_pdd(qpd);
688 /* Retrieve PD base */
689 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
691 dqm_lock(dqm);
692 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
693 goto out;
694 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
695 qpd->evicted--;
696 goto out;
699 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
700 pdd->process->pasid);
702 /* Update PD Base in QPD */
703 qpd->page_table_base = pd_base;
704 pr_debug("Updated PD address to 0x%llx\n", pd_base);
706 if (!list_empty(&qpd->queues_list)) {
707 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
708 dqm->dev->kgd,
709 qpd->vmid,
710 qpd->page_table_base);
711 kfd_flush_tlb(pdd);
714 /* Take a safe reference to the mm_struct, which may otherwise
715 * disappear even while the kfd_process is still referenced.
717 mm = get_task_mm(pdd->process->lead_thread);
718 if (!mm) {
719 ret = -EFAULT;
720 goto out;
723 /* Remove the eviction flags. Activate queues that are not
724 * inactive for other reasons.
726 list_for_each_entry(q, &qpd->queues_list, list) {
727 q->properties.is_evicted = false;
728 if (!QUEUE_IS_ACTIVE(q->properties))
729 continue;
731 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
732 q->properties.type)];
733 q->properties.is_active = true;
734 dqm->queue_count++;
736 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
737 continue;
739 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
740 q->queue, &q->properties, mm);
741 if (retval && !ret)
742 /* Return the first error, but keep going to
743 * maintain a consistent eviction state
745 ret = retval;
747 qpd->evicted = 0;
748 out:
749 if (mm)
750 mmput(mm);
751 dqm_unlock(dqm);
752 return ret;
755 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
756 struct qcm_process_device *qpd)
758 struct queue *q;
759 struct kfd_process_device *pdd;
760 uint64_t pd_base;
761 int retval = 0;
763 pdd = qpd_to_pdd(qpd);
764 /* Retrieve PD base */
765 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
767 dqm_lock(dqm);
768 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
769 goto out;
770 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
771 qpd->evicted--;
772 goto out;
775 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
776 pdd->process->pasid);
778 /* Update PD Base in QPD */
779 qpd->page_table_base = pd_base;
780 pr_debug("Updated PD address to 0x%llx\n", pd_base);
782 /* activate all active queues on the qpd */
783 list_for_each_entry(q, &qpd->queues_list, list) {
784 q->properties.is_evicted = false;
785 if (!QUEUE_IS_ACTIVE(q->properties))
786 continue;
788 q->properties.is_active = true;
789 dqm->queue_count++;
791 retval = execute_queues_cpsch(dqm,
792 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
793 qpd->evicted = 0;
794 out:
795 dqm_unlock(dqm);
796 return retval;
799 static int register_process(struct device_queue_manager *dqm,
800 struct qcm_process_device *qpd)
802 struct device_process_node *n;
803 struct kfd_process_device *pdd;
804 uint64_t pd_base;
805 int retval;
807 n = kzalloc(sizeof(*n), GFP_KERNEL);
808 if (!n)
809 return -ENOMEM;
811 n->qpd = qpd;
813 pdd = qpd_to_pdd(qpd);
814 /* Retrieve PD base */
815 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
817 dqm_lock(dqm);
818 list_add(&n->list, &dqm->queues);
820 /* Update PD Base in QPD */
821 qpd->page_table_base = pd_base;
822 pr_debug("Updated PD address to 0x%llx\n", pd_base);
824 retval = dqm->asic_ops.update_qpd(dqm, qpd);
826 dqm->processes_count++;
828 dqm_unlock(dqm);
830 /* Outside the DQM lock because under the DQM lock we can't do
831 * reclaim or take other locks that others hold while reclaiming.
833 kfd_inc_compute_active(dqm->dev);
835 return retval;
838 static int unregister_process(struct device_queue_manager *dqm,
839 struct qcm_process_device *qpd)
841 int retval;
842 struct device_process_node *cur, *next;
844 pr_debug("qpd->queues_list is %s\n",
845 list_empty(&qpd->queues_list) ? "empty" : "not empty");
847 retval = 0;
848 dqm_lock(dqm);
850 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
851 if (qpd == cur->qpd) {
852 list_del(&cur->list);
853 kfree(cur);
854 dqm->processes_count--;
855 goto out;
858 /* qpd not found in dqm list */
859 retval = 1;
860 out:
861 dqm_unlock(dqm);
863 /* Outside the DQM lock because under the DQM lock we can't do
864 * reclaim or take other locks that others hold while reclaiming.
866 if (!retval)
867 kfd_dec_compute_active(dqm->dev);
869 return retval;
872 static int
873 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
874 unsigned int vmid)
876 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
877 dqm->dev->kgd, pasid, vmid);
880 static void init_interrupts(struct device_queue_manager *dqm)
882 unsigned int i;
884 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
885 if (is_pipe_enabled(dqm, 0, i))
886 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
889 static int initialize_nocpsch(struct device_queue_manager *dqm)
891 int pipe, queue;
893 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
895 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
896 sizeof(unsigned int), GFP_KERNEL);
897 if (!dqm->allocated_queues)
898 return -ENOMEM;
900 mutex_init(&dqm->lock_hidden);
901 INIT_LIST_HEAD(&dqm->queues);
902 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
903 dqm->sdma_queue_count = 0;
904 dqm->xgmi_sdma_queue_count = 0;
906 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
907 int pipe_offset = pipe * get_queues_per_pipe(dqm);
909 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
910 if (test_bit(pipe_offset + queue,
911 dqm->dev->shared_resources.queue_bitmap))
912 dqm->allocated_queues[pipe] |= 1 << queue;
915 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
917 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
918 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
920 return 0;
923 static void uninitialize(struct device_queue_manager *dqm)
925 int i;
927 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
929 kfree(dqm->allocated_queues);
930 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
931 kfree(dqm->mqd_mgrs[i]);
932 mutex_destroy(&dqm->lock_hidden);
935 static int start_nocpsch(struct device_queue_manager *dqm)
937 pr_info("SW scheduler is used");
938 init_interrupts(dqm);
940 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
941 return pm_init(&dqm->packets, dqm);
942 dqm->sched_running = true;
944 return 0;
947 static int stop_nocpsch(struct device_queue_manager *dqm)
949 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
950 pm_uninit(&dqm->packets, false);
951 dqm->sched_running = false;
953 return 0;
956 static void pre_reset(struct device_queue_manager *dqm)
958 dqm_lock(dqm);
959 dqm->is_resetting = true;
960 dqm_unlock(dqm);
963 static int allocate_sdma_queue(struct device_queue_manager *dqm,
964 struct queue *q)
966 int bit;
968 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
969 if (dqm->sdma_bitmap == 0)
970 return -ENOMEM;
971 bit = __ffs64(dqm->sdma_bitmap);
972 dqm->sdma_bitmap &= ~(1ULL << bit);
973 q->sdma_id = bit;
974 q->properties.sdma_engine_id = q->sdma_id %
975 get_num_sdma_engines(dqm);
976 q->properties.sdma_queue_id = q->sdma_id /
977 get_num_sdma_engines(dqm);
978 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
979 if (dqm->xgmi_sdma_bitmap == 0)
980 return -ENOMEM;
981 bit = __ffs64(dqm->xgmi_sdma_bitmap);
982 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
983 q->sdma_id = bit;
984 /* sdma_engine_id is sdma id including
985 * both PCIe-optimized SDMAs and XGMI-
986 * optimized SDMAs. The calculation below
987 * assumes the first N engines are always
988 * PCIe-optimized ones
990 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
991 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
992 q->properties.sdma_queue_id = q->sdma_id /
993 get_num_xgmi_sdma_engines(dqm);
996 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
997 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
999 return 0;
1002 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1003 struct queue *q)
1005 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1006 if (q->sdma_id >= get_num_sdma_queues(dqm))
1007 return;
1008 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1009 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1010 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1011 return;
1012 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1017 * Device Queue Manager implementation for cp scheduler
1020 static int set_sched_resources(struct device_queue_manager *dqm)
1022 int i, mec;
1023 struct scheduling_resources res;
1025 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
1027 res.queue_mask = 0;
1028 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1029 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1030 / dqm->dev->shared_resources.num_pipe_per_mec;
1032 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
1033 continue;
1035 /* only acquire queues from the first MEC */
1036 if (mec > 0)
1037 continue;
1039 /* This situation may be hit in the future if a new HW
1040 * generation exposes more than 64 queues. If so, the
1041 * definition of res.queue_mask needs updating
1043 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1044 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1045 break;
1048 res.queue_mask |= (1ull << i);
1050 res.gws_mask = ~0ull;
1051 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1053 pr_debug("Scheduling resources:\n"
1054 "vmid mask: 0x%8X\n"
1055 "queue mask: 0x%8llX\n",
1056 res.vmid_mask, res.queue_mask);
1058 return pm_send_set_resources(&dqm->packets, &res);
1061 static int initialize_cpsch(struct device_queue_manager *dqm)
1063 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1065 mutex_init(&dqm->lock_hidden);
1066 INIT_LIST_HEAD(&dqm->queues);
1067 dqm->queue_count = dqm->processes_count = 0;
1068 dqm->sdma_queue_count = 0;
1069 dqm->xgmi_sdma_queue_count = 0;
1070 dqm->active_runlist = false;
1071 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1072 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1074 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1076 return 0;
1079 static int start_cpsch(struct device_queue_manager *dqm)
1081 int retval;
1083 retval = 0;
1085 retval = pm_init(&dqm->packets, dqm);
1086 if (retval)
1087 goto fail_packet_manager_init;
1089 retval = set_sched_resources(dqm);
1090 if (retval)
1091 goto fail_set_sched_resources;
1093 pr_debug("Allocating fence memory\n");
1095 /* allocate fence memory on the gart */
1096 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1097 &dqm->fence_mem);
1099 if (retval)
1100 goto fail_allocate_vidmem;
1102 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1103 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1105 init_interrupts(dqm);
1107 dqm_lock(dqm);
1108 /* clear hang status when driver try to start the hw scheduler */
1109 dqm->is_hws_hang = false;
1110 dqm->is_resetting = false;
1111 dqm->sched_running = true;
1112 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1113 dqm_unlock(dqm);
1115 return 0;
1116 fail_allocate_vidmem:
1117 fail_set_sched_resources:
1118 pm_uninit(&dqm->packets, false);
1119 fail_packet_manager_init:
1120 return retval;
1123 static int stop_cpsch(struct device_queue_manager *dqm)
1125 bool hanging;
1127 dqm_lock(dqm);
1128 if (!dqm->is_hws_hang)
1129 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1130 hanging = dqm->is_hws_hang || dqm->is_resetting;
1131 dqm->sched_running = false;
1132 dqm_unlock(dqm);
1134 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1135 pm_uninit(&dqm->packets, hanging);
1137 return 0;
1140 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1141 struct kernel_queue *kq,
1142 struct qcm_process_device *qpd)
1144 dqm_lock(dqm);
1145 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1146 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1147 dqm->total_queue_count);
1148 dqm_unlock(dqm);
1149 return -EPERM;
1153 * Unconditionally increment this counter, regardless of the queue's
1154 * type or whether the queue is active.
1156 dqm->total_queue_count++;
1157 pr_debug("Total of %d queues are accountable so far\n",
1158 dqm->total_queue_count);
1160 list_add(&kq->list, &qpd->priv_queue_list);
1161 dqm->queue_count++;
1162 qpd->is_debug = true;
1163 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1164 dqm_unlock(dqm);
1166 return 0;
1169 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1170 struct kernel_queue *kq,
1171 struct qcm_process_device *qpd)
1173 dqm_lock(dqm);
1174 list_del(&kq->list);
1175 dqm->queue_count--;
1176 qpd->is_debug = false;
1177 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1179 * Unconditionally decrement this counter, regardless of the queue's
1180 * type.
1182 dqm->total_queue_count--;
1183 pr_debug("Total of %d queues are accountable so far\n",
1184 dqm->total_queue_count);
1185 dqm_unlock(dqm);
1188 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1189 struct qcm_process_device *qpd)
1191 int retval;
1192 struct mqd_manager *mqd_mgr;
1194 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1195 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1196 dqm->total_queue_count);
1197 retval = -EPERM;
1198 goto out;
1201 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1202 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1203 dqm_lock(dqm);
1204 retval = allocate_sdma_queue(dqm, q);
1205 dqm_unlock(dqm);
1206 if (retval)
1207 goto out;
1210 retval = allocate_doorbell(qpd, q);
1211 if (retval)
1212 goto out_deallocate_sdma_queue;
1214 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1215 q->properties.type)];
1217 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1218 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1219 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1220 q->properties.tba_addr = qpd->tba_addr;
1221 q->properties.tma_addr = qpd->tma_addr;
1222 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1223 if (!q->mqd_mem_obj) {
1224 retval = -ENOMEM;
1225 goto out_deallocate_doorbell;
1228 dqm_lock(dqm);
1230 * Eviction state logic: mark all queues as evicted, even ones
1231 * not currently active. Restoring inactive queues later only
1232 * updates the is_evicted flag but is a no-op otherwise.
1234 q->properties.is_evicted = !!qpd->evicted;
1235 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1236 &q->gart_mqd_addr, &q->properties);
1238 list_add(&q->list, &qpd->queues_list);
1239 qpd->queue_count++;
1240 if (q->properties.is_active) {
1241 dqm->queue_count++;
1242 retval = execute_queues_cpsch(dqm,
1243 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1246 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1247 dqm->sdma_queue_count++;
1248 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1249 dqm->xgmi_sdma_queue_count++;
1251 * Unconditionally increment this counter, regardless of the queue's
1252 * type or whether the queue is active.
1254 dqm->total_queue_count++;
1256 pr_debug("Total of %d queues are accountable so far\n",
1257 dqm->total_queue_count);
1259 dqm_unlock(dqm);
1260 return retval;
1262 out_deallocate_doorbell:
1263 deallocate_doorbell(qpd, q);
1264 out_deallocate_sdma_queue:
1265 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1266 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1267 dqm_lock(dqm);
1268 deallocate_sdma_queue(dqm, q);
1269 dqm_unlock(dqm);
1271 out:
1272 return retval;
1275 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1276 unsigned int fence_value,
1277 unsigned int timeout_ms)
1279 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1281 while (*fence_addr != fence_value) {
1282 if (time_after(jiffies, end_jiffies)) {
1283 pr_err("qcm fence wait loop timeout expired\n");
1284 /* In HWS case, this is used to halt the driver thread
1285 * in order not to mess up CP states before doing
1286 * scandumps for FW debugging.
1288 while (halt_if_hws_hang)
1289 schedule();
1291 return -ETIME;
1293 schedule();
1296 return 0;
1299 static int unmap_sdma_queues(struct device_queue_manager *dqm)
1301 int i, retval = 0;
1303 for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
1304 dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
1305 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1306 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
1307 if (retval)
1308 return retval;
1310 return retval;
1313 /* dqm->lock mutex has to be locked before calling this function */
1314 static int map_queues_cpsch(struct device_queue_manager *dqm)
1316 int retval;
1318 if (!dqm->sched_running)
1319 return 0;
1320 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
1321 return 0;
1322 if (dqm->active_runlist)
1323 return 0;
1325 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1326 pr_debug("%s sent runlist\n", __func__);
1327 if (retval) {
1328 pr_err("failed to execute runlist\n");
1329 return retval;
1331 dqm->active_runlist = true;
1333 return retval;
1336 /* dqm->lock mutex has to be locked before calling this function */
1337 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1338 enum kfd_unmap_queues_filter filter,
1339 uint32_t filter_param)
1341 int retval = 0;
1343 if (!dqm->sched_running)
1344 return 0;
1345 if (dqm->is_hws_hang)
1346 return -EIO;
1347 if (!dqm->active_runlist)
1348 return retval;
1350 pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
1351 dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
1353 if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
1354 unmap_sdma_queues(dqm);
1356 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1357 filter, filter_param, false, 0);
1358 if (retval)
1359 return retval;
1361 *dqm->fence_addr = KFD_FENCE_INIT;
1362 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1363 KFD_FENCE_COMPLETED);
1364 /* should be timed out */
1365 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1366 queue_preemption_timeout_ms);
1367 if (retval) {
1368 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1369 dqm->is_hws_hang = true;
1370 /* It's possible we're detecting a HWS hang in the
1371 * middle of a GPU reset. No need to schedule another
1372 * reset in this case.
1374 if (!dqm->is_resetting)
1375 schedule_work(&dqm->hw_exception_work);
1376 return retval;
1379 pm_release_ib(&dqm->packets);
1380 dqm->active_runlist = false;
1382 return retval;
1385 /* dqm->lock mutex has to be locked before calling this function */
1386 static int execute_queues_cpsch(struct device_queue_manager *dqm,
1387 enum kfd_unmap_queues_filter filter,
1388 uint32_t filter_param)
1390 int retval;
1392 if (dqm->is_hws_hang)
1393 return -EIO;
1394 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1395 if (retval)
1396 return retval;
1398 return map_queues_cpsch(dqm);
1401 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1402 struct qcm_process_device *qpd,
1403 struct queue *q)
1405 int retval;
1406 struct mqd_manager *mqd_mgr;
1408 retval = 0;
1410 /* remove queue from list to prevent rescheduling after preemption */
1411 dqm_lock(dqm);
1413 if (qpd->is_debug) {
1415 * error, currently we do not allow to destroy a queue
1416 * of a currently debugged process
1418 retval = -EBUSY;
1419 goto failed_try_destroy_debugged_queue;
1423 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1424 q->properties.type)];
1426 deallocate_doorbell(qpd, q);
1428 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1429 dqm->sdma_queue_count--;
1430 deallocate_sdma_queue(dqm, q);
1431 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1432 dqm->xgmi_sdma_queue_count--;
1433 deallocate_sdma_queue(dqm, q);
1436 list_del(&q->list);
1437 qpd->queue_count--;
1438 if (q->properties.is_active) {
1439 dqm->queue_count--;
1440 retval = execute_queues_cpsch(dqm,
1441 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1442 if (retval == -ETIME)
1443 qpd->reset_wavefronts = true;
1447 * Unconditionally decrement this counter, regardless of the queue's
1448 * type
1450 dqm->total_queue_count--;
1451 pr_debug("Total of %d queues are accountable so far\n",
1452 dqm->total_queue_count);
1454 dqm_unlock(dqm);
1456 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1457 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1459 return retval;
1461 failed_try_destroy_debugged_queue:
1463 dqm_unlock(dqm);
1464 return retval;
1468 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1469 * stay in user mode.
1471 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1472 /* APE1 limit is inclusive and 64K aligned. */
1473 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1475 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1476 struct qcm_process_device *qpd,
1477 enum cache_policy default_policy,
1478 enum cache_policy alternate_policy,
1479 void __user *alternate_aperture_base,
1480 uint64_t alternate_aperture_size)
1482 bool retval = true;
1484 if (!dqm->asic_ops.set_cache_memory_policy)
1485 return retval;
1487 dqm_lock(dqm);
1489 if (alternate_aperture_size == 0) {
1490 /* base > limit disables APE1 */
1491 qpd->sh_mem_ape1_base = 1;
1492 qpd->sh_mem_ape1_limit = 0;
1493 } else {
1495 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1496 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1497 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1498 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1499 * Verify that the base and size parameters can be
1500 * represented in this format and convert them.
1501 * Additionally restrict APE1 to user-mode addresses.
1504 uint64_t base = (uintptr_t)alternate_aperture_base;
1505 uint64_t limit = base + alternate_aperture_size - 1;
1507 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1508 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1509 retval = false;
1510 goto out;
1513 qpd->sh_mem_ape1_base = base >> 16;
1514 qpd->sh_mem_ape1_limit = limit >> 16;
1517 retval = dqm->asic_ops.set_cache_memory_policy(
1518 dqm,
1519 qpd,
1520 default_policy,
1521 alternate_policy,
1522 alternate_aperture_base,
1523 alternate_aperture_size);
1525 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1526 program_sh_mem_settings(dqm, qpd);
1528 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1529 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1530 qpd->sh_mem_ape1_limit);
1532 out:
1533 dqm_unlock(dqm);
1534 return retval;
1537 static int set_trap_handler(struct device_queue_manager *dqm,
1538 struct qcm_process_device *qpd,
1539 uint64_t tba_addr,
1540 uint64_t tma_addr)
1542 uint64_t *tma;
1544 if (dqm->dev->cwsr_enabled) {
1545 /* Jump from CWSR trap handler to user trap */
1546 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1547 tma[0] = tba_addr;
1548 tma[1] = tma_addr;
1549 } else {
1550 qpd->tba_addr = tba_addr;
1551 qpd->tma_addr = tma_addr;
1554 return 0;
1557 static int process_termination_nocpsch(struct device_queue_manager *dqm,
1558 struct qcm_process_device *qpd)
1560 struct queue *q, *next;
1561 struct device_process_node *cur, *next_dpn;
1562 int retval = 0;
1563 bool found = false;
1565 dqm_lock(dqm);
1567 /* Clear all user mode queues */
1568 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1569 int ret;
1571 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1572 if (ret)
1573 retval = ret;
1576 /* Unregister process */
1577 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1578 if (qpd == cur->qpd) {
1579 list_del(&cur->list);
1580 kfree(cur);
1581 dqm->processes_count--;
1582 found = true;
1583 break;
1587 dqm_unlock(dqm);
1589 /* Outside the DQM lock because under the DQM lock we can't do
1590 * reclaim or take other locks that others hold while reclaiming.
1592 if (found)
1593 kfd_dec_compute_active(dqm->dev);
1595 return retval;
1598 static int get_wave_state(struct device_queue_manager *dqm,
1599 struct queue *q,
1600 void __user *ctl_stack,
1601 u32 *ctl_stack_used_size,
1602 u32 *save_area_used_size)
1604 struct mqd_manager *mqd_mgr;
1605 int r;
1607 dqm_lock(dqm);
1609 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1610 q->properties.is_active || !q->device->cwsr_enabled) {
1611 r = -EINVAL;
1612 goto dqm_unlock;
1615 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
1617 if (!mqd_mgr->get_wave_state) {
1618 r = -EINVAL;
1619 goto dqm_unlock;
1622 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1623 ctl_stack_used_size, save_area_used_size);
1625 dqm_unlock:
1626 dqm_unlock(dqm);
1627 return r;
1630 static int process_termination_cpsch(struct device_queue_manager *dqm,
1631 struct qcm_process_device *qpd)
1633 int retval;
1634 struct queue *q, *next;
1635 struct kernel_queue *kq, *kq_next;
1636 struct mqd_manager *mqd_mgr;
1637 struct device_process_node *cur, *next_dpn;
1638 enum kfd_unmap_queues_filter filter =
1639 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1640 bool found = false;
1642 retval = 0;
1644 dqm_lock(dqm);
1646 /* Clean all kernel queues */
1647 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1648 list_del(&kq->list);
1649 dqm->queue_count--;
1650 qpd->is_debug = false;
1651 dqm->total_queue_count--;
1652 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1655 /* Clear all user mode queues */
1656 list_for_each_entry(q, &qpd->queues_list, list) {
1657 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1658 dqm->sdma_queue_count--;
1659 deallocate_sdma_queue(dqm, q);
1660 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1661 dqm->xgmi_sdma_queue_count--;
1662 deallocate_sdma_queue(dqm, q);
1665 if (q->properties.is_active)
1666 dqm->queue_count--;
1668 dqm->total_queue_count--;
1671 /* Unregister process */
1672 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1673 if (qpd == cur->qpd) {
1674 list_del(&cur->list);
1675 kfree(cur);
1676 dqm->processes_count--;
1677 found = true;
1678 break;
1682 retval = execute_queues_cpsch(dqm, filter, 0);
1683 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1684 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1685 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1686 qpd->reset_wavefronts = false;
1689 dqm_unlock(dqm);
1691 /* Outside the DQM lock because under the DQM lock we can't do
1692 * reclaim or take other locks that others hold while reclaiming.
1694 if (found)
1695 kfd_dec_compute_active(dqm->dev);
1697 /* Lastly, free mqd resources.
1698 * Do free_mqd() after dqm_unlock to avoid circular locking.
1700 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1701 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1702 q->properties.type)];
1703 list_del(&q->list);
1704 qpd->queue_count--;
1705 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1708 return retval;
1711 static int init_mqd_managers(struct device_queue_manager *dqm)
1713 int i, j;
1714 struct mqd_manager *mqd_mgr;
1716 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1717 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1718 if (!mqd_mgr) {
1719 pr_err("mqd manager [%d] initialization failed\n", i);
1720 goto out_free;
1722 dqm->mqd_mgrs[i] = mqd_mgr;
1725 return 0;
1727 out_free:
1728 for (j = 0; j < i; j++) {
1729 kfree(dqm->mqd_mgrs[j]);
1730 dqm->mqd_mgrs[j] = NULL;
1733 return -ENOMEM;
1736 /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1737 static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1739 int retval;
1740 struct kfd_dev *dev = dqm->dev;
1741 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1742 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1743 (dev->device_info->num_sdma_engines +
1744 dev->device_info->num_xgmi_sdma_engines) *
1745 dev->device_info->num_sdma_queues_per_engine +
1746 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1748 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1749 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1750 (void *)&(mem_obj->cpu_ptr), true);
1752 return retval;
1755 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1757 struct device_queue_manager *dqm;
1759 pr_debug("Loading device queue manager\n");
1761 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1762 if (!dqm)
1763 return NULL;
1765 switch (dev->device_info->asic_family) {
1766 /* HWS is not available on Hawaii. */
1767 case CHIP_HAWAII:
1768 /* HWS depends on CWSR for timely dequeue. CWSR is not
1769 * available on Tonga.
1771 * FIXME: This argument also applies to Kaveri.
1773 case CHIP_TONGA:
1774 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1775 break;
1776 default:
1777 dqm->sched_policy = sched_policy;
1778 break;
1781 dqm->dev = dev;
1782 switch (dqm->sched_policy) {
1783 case KFD_SCHED_POLICY_HWS:
1784 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1785 /* initialize dqm for cp scheduling */
1786 dqm->ops.create_queue = create_queue_cpsch;
1787 dqm->ops.initialize = initialize_cpsch;
1788 dqm->ops.start = start_cpsch;
1789 dqm->ops.stop = stop_cpsch;
1790 dqm->ops.pre_reset = pre_reset;
1791 dqm->ops.destroy_queue = destroy_queue_cpsch;
1792 dqm->ops.update_queue = update_queue;
1793 dqm->ops.register_process = register_process;
1794 dqm->ops.unregister_process = unregister_process;
1795 dqm->ops.uninitialize = uninitialize;
1796 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1797 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1798 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1799 dqm->ops.set_trap_handler = set_trap_handler;
1800 dqm->ops.process_termination = process_termination_cpsch;
1801 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1802 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1803 dqm->ops.get_wave_state = get_wave_state;
1804 break;
1805 case KFD_SCHED_POLICY_NO_HWS:
1806 /* initialize dqm for no cp scheduling */
1807 dqm->ops.start = start_nocpsch;
1808 dqm->ops.stop = stop_nocpsch;
1809 dqm->ops.pre_reset = pre_reset;
1810 dqm->ops.create_queue = create_queue_nocpsch;
1811 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1812 dqm->ops.update_queue = update_queue;
1813 dqm->ops.register_process = register_process;
1814 dqm->ops.unregister_process = unregister_process;
1815 dqm->ops.initialize = initialize_nocpsch;
1816 dqm->ops.uninitialize = uninitialize;
1817 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1818 dqm->ops.set_trap_handler = set_trap_handler;
1819 dqm->ops.process_termination = process_termination_nocpsch;
1820 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1821 dqm->ops.restore_process_queues =
1822 restore_process_queues_nocpsch;
1823 dqm->ops.get_wave_state = get_wave_state;
1824 break;
1825 default:
1826 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1827 goto out_free;
1830 switch (dev->device_info->asic_family) {
1831 case CHIP_CARRIZO:
1832 device_queue_manager_init_vi(&dqm->asic_ops);
1833 break;
1835 case CHIP_KAVERI:
1836 device_queue_manager_init_cik(&dqm->asic_ops);
1837 break;
1839 case CHIP_HAWAII:
1840 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1841 break;
1843 case CHIP_TONGA:
1844 case CHIP_FIJI:
1845 case CHIP_POLARIS10:
1846 case CHIP_POLARIS11:
1847 case CHIP_POLARIS12:
1848 case CHIP_VEGAM:
1849 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1850 break;
1852 case CHIP_VEGA10:
1853 case CHIP_VEGA12:
1854 case CHIP_VEGA20:
1855 case CHIP_RAVEN:
1856 case CHIP_RENOIR:
1857 case CHIP_ARCTURUS:
1858 device_queue_manager_init_v9(&dqm->asic_ops);
1859 break;
1860 case CHIP_NAVI10:
1861 case CHIP_NAVI12:
1862 case CHIP_NAVI14:
1863 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1864 break;
1865 default:
1866 WARN(1, "Unexpected ASIC family %u",
1867 dev->device_info->asic_family);
1868 goto out_free;
1871 if (init_mqd_managers(dqm))
1872 goto out_free;
1874 if (allocate_hiq_sdma_mqd(dqm)) {
1875 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1876 goto out_free;
1879 if (!dqm->ops.initialize(dqm))
1880 return dqm;
1882 out_free:
1883 kfree(dqm);
1884 return NULL;
1887 static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1888 struct kfd_mem_obj *mqd)
1890 WARN(!mqd, "No hiq sdma mqd trunk to free");
1892 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1895 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1897 dqm->ops.uninitialize(dqm);
1898 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1899 kfree(dqm);
1902 int kfd_process_vm_fault(struct device_queue_manager *dqm,
1903 unsigned int pasid)
1905 struct kfd_process_device *pdd;
1906 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1907 int ret = 0;
1909 if (!p)
1910 return -EINVAL;
1911 pdd = kfd_get_process_device_data(dqm->dev, p);
1912 if (pdd)
1913 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1914 kfd_unref_process(p);
1916 return ret;
1919 static void kfd_process_hw_exception(struct work_struct *work)
1921 struct device_queue_manager *dqm = container_of(work,
1922 struct device_queue_manager, hw_exception_work);
1923 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
1926 #if defined(CONFIG_DEBUG_FS)
1928 static void seq_reg_dump(struct seq_file *m,
1929 uint32_t (*dump)[2], uint32_t n_regs)
1931 uint32_t i, count;
1933 for (i = 0, count = 0; i < n_regs; i++) {
1934 if (count == 0 ||
1935 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1936 seq_printf(m, "%s %08x: %08x",
1937 i ? "\n" : "",
1938 dump[i][0], dump[i][1]);
1939 count = 7;
1940 } else {
1941 seq_printf(m, " %08x", dump[i][1]);
1942 count--;
1946 seq_puts(m, "\n");
1949 int dqm_debugfs_hqds(struct seq_file *m, void *data)
1951 struct device_queue_manager *dqm = data;
1952 uint32_t (*dump)[2], n_regs;
1953 int pipe, queue;
1954 int r = 0;
1956 if (!dqm->sched_running) {
1957 seq_printf(m, " Device is stopped\n");
1959 return 0;
1962 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
1963 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
1964 &dump, &n_regs);
1965 if (!r) {
1966 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
1967 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1968 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1969 KFD_CIK_HIQ_QUEUE);
1970 seq_reg_dump(m, dump, n_regs);
1972 kfree(dump);
1975 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1976 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1978 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1979 if (!test_bit(pipe_offset + queue,
1980 dqm->dev->shared_resources.queue_bitmap))
1981 continue;
1983 r = dqm->dev->kfd2kgd->hqd_dump(
1984 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1985 if (r)
1986 break;
1988 seq_printf(m, " CP Pipe %d, Queue %d\n",
1989 pipe, queue);
1990 seq_reg_dump(m, dump, n_regs);
1992 kfree(dump);
1996 for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
1997 get_num_xgmi_sdma_engines(dqm); pipe++) {
1998 for (queue = 0;
1999 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
2000 queue++) {
2001 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
2002 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2003 if (r)
2004 break;
2006 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2007 pipe, queue);
2008 seq_reg_dump(m, dump, n_regs);
2010 kfree(dump);
2014 return r;
2017 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
2019 int r = 0;
2021 dqm_lock(dqm);
2022 dqm->active_runlist = true;
2023 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2024 dqm_unlock(dqm);
2026 return r;
2029 #endif