treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / misc / habanalabs / hw_queue.c
blob91579dde9262ae2925522c6ae11acf35597c7def
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
8 #include "habanalabs.h"
10 #include <linux/slab.h>
13 * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
15 * @ptr: the current pi/ci value
16 * @val: the amount to add
18 * Add val to ptr. It can go until twice the queue length.
20 inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
22 ptr += val;
23 ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
24 return ptr;
27 static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
29 int delta = (q->pi - q->ci);
31 if (delta >= 0)
32 return (queue_len - delta);
33 else
34 return (abs(delta) - queue_len);
37 void hl_int_hw_queue_update_ci(struct hl_cs *cs)
39 struct hl_device *hdev = cs->ctx->hdev;
40 struct hl_hw_queue *q;
41 int i;
43 hdev->asic_funcs->hw_queues_lock(hdev);
45 if (hdev->disabled)
46 goto out;
48 q = &hdev->kernel_queues[0];
49 for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
50 if (q->queue_type == QUEUE_TYPE_INT) {
51 q->ci += cs->jobs_in_queue_cnt[i];
52 q->ci &= ((q->int_queue_len << 1) - 1);
56 out:
57 hdev->asic_funcs->hw_queues_unlock(hdev);
61 * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
62 * H/W queue.
63 * @hdev: pointer to habanalabs device structure
64 * @q: pointer to habanalabs queue structure
65 * @ctl: BD's control word
66 * @len: BD's length
67 * @ptr: BD's pointer
69 * This function assumes there is enough space on the queue to submit a new
70 * BD to it. It initializes the next BD and calls the device specific
71 * function to set the pi (and doorbell)
73 * This function must be called when the scheduler mutex is taken
76 static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
77 struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
79 struct hl_bd *bd;
81 bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
82 bd += hl_pi_2_offset(q->pi);
83 bd->ctl = cpu_to_le32(ctl);
84 bd->len = cpu_to_le32(len);
85 bd->ptr = cpu_to_le64(ptr);
87 q->pi = hl_queue_inc_ptr(q->pi);
88 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
92 * ext_queue_sanity_checks - perform some sanity checks on external queue
94 * @hdev : pointer to hl_device structure
95 * @q : pointer to hl_hw_queue structure
96 * @num_of_entries : how many entries to check for space
97 * @reserve_cq_entry : whether to reserve an entry in the cq
99 * H/W queues spinlock should be taken before calling this function
101 * Perform the following:
102 * - Make sure we have enough space in the h/w queue
103 * - Make sure we have enough space in the completion queue
104 * - Reserve space in the completion queue (needs to be reversed if there
105 * is a failure down the road before the actual submission of work). Only
106 * do this action if reserve_cq_entry is true
109 static int ext_queue_sanity_checks(struct hl_device *hdev,
110 struct hl_hw_queue *q, int num_of_entries,
111 bool reserve_cq_entry)
113 atomic_t *free_slots =
114 &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
115 int free_slots_cnt;
117 /* Check we have enough space in the queue */
118 free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
120 if (free_slots_cnt < num_of_entries) {
121 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
122 q->hw_queue_id, num_of_entries);
123 return -EAGAIN;
126 if (reserve_cq_entry) {
128 * Check we have enough space in the completion queue
129 * Add -1 to counter (decrement) unless counter was already 0
130 * In that case, CQ is full so we can't submit a new CB because
131 * we won't get ack on its completion
132 * atomic_add_unless will return 0 if counter was already 0
134 if (atomic_add_negative(num_of_entries * -1, free_slots)) {
135 dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
136 num_of_entries, q->hw_queue_id);
137 atomic_add(num_of_entries, free_slots);
138 return -EAGAIN;
142 return 0;
146 * int_queue_sanity_checks - perform some sanity checks on internal queue
148 * @hdev : pointer to hl_device structure
149 * @q : pointer to hl_hw_queue structure
150 * @num_of_entries : how many entries to check for space
152 * H/W queues spinlock should be taken before calling this function
154 * Perform the following:
155 * - Make sure we have enough space in the h/w queue
158 static int int_queue_sanity_checks(struct hl_device *hdev,
159 struct hl_hw_queue *q,
160 int num_of_entries)
162 int free_slots_cnt;
164 /* Check we have enough space in the queue */
165 free_slots_cnt = queue_free_slots(q, q->int_queue_len);
167 if (free_slots_cnt < num_of_entries) {
168 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
169 q->hw_queue_id, num_of_entries);
170 return -EAGAIN;
173 return 0;
177 * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
178 * @hdev: Pointer to hl_device structure.
179 * @q: Pointer to hl_hw_queue structure.
180 * @num_of_entries: How many entries to check for space.
182 * Perform the following:
183 * - Make sure we have enough space in the completion queue.
184 * This check also ensures that there is enough space in the h/w queue, as
185 * both queues are of the same size.
186 * - Reserve space in the completion queue (needs to be reversed if there
187 * is a failure down the road before the actual submission of work).
189 * Both operations are done using the "free_slots_cnt" field of the completion
190 * queue. The CI counters of the queue and the completion queue are not
191 * needed/used for the H/W queue type.
193 static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
194 int num_of_entries)
196 atomic_t *free_slots =
197 &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
200 * Check we have enough space in the completion queue.
201 * Add -1 to counter (decrement) unless counter was already 0.
202 * In that case, CQ is full so we can't submit a new CB.
203 * atomic_add_unless will return 0 if counter was already 0.
205 if (atomic_add_negative(num_of_entries * -1, free_slots)) {
206 dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
207 num_of_entries, q->hw_queue_id);
208 atomic_add(num_of_entries, free_slots);
209 return -EAGAIN;
212 return 0;
216 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
218 * @hdev: pointer to hl_device structure
219 * @hw_queue_id: Queue's type
220 * @cb_size: size of CB
221 * @cb_ptr: pointer to CB location
223 * This function sends a single CB, that must NOT generate a completion entry
226 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
227 u32 cb_size, u64 cb_ptr)
229 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
230 int rc = 0;
233 * The CPU queue is a synchronous queue with an effective depth of
234 * a single entry (although it is allocated with room for multiple
235 * entries). Therefore, there is a different lock, called
236 * send_cpu_message_lock, that serializes accesses to the CPU queue.
237 * As a result, we don't need to lock the access to the entire H/W
238 * queues module when submitting a JOB to the CPU queue
240 if (q->queue_type != QUEUE_TYPE_CPU)
241 hdev->asic_funcs->hw_queues_lock(hdev);
243 if (hdev->disabled) {
244 rc = -EPERM;
245 goto out;
249 * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
250 * type only on init phase, when the queues are empty and being tested,
251 * so there is no need for sanity checks.
253 if (q->queue_type != QUEUE_TYPE_HW) {
254 rc = ext_queue_sanity_checks(hdev, q, 1, false);
255 if (rc)
256 goto out;
259 ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
261 out:
262 if (q->queue_type != QUEUE_TYPE_CPU)
263 hdev->asic_funcs->hw_queues_unlock(hdev);
265 return rc;
269 * ext_queue_schedule_job - submit a JOB to an external queue
271 * @job: pointer to the job that needs to be submitted to the queue
273 * This function must be called when the scheduler mutex is taken
276 static void ext_queue_schedule_job(struct hl_cs_job *job)
278 struct hl_device *hdev = job->cs->ctx->hdev;
279 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
280 struct hl_cq_entry cq_pkt;
281 struct hl_cq *cq;
282 u64 cq_addr;
283 struct hl_cb *cb;
284 u32 ctl;
285 u32 len;
286 u64 ptr;
289 * Update the JOB ID inside the BD CTL so the device would know what
290 * to write in the completion queue
292 ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
294 cb = job->patched_cb;
295 len = job->job_cb_size;
296 ptr = cb->bus_address;
298 cq_pkt.data = cpu_to_le32(
299 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
300 & CQ_ENTRY_SHADOW_INDEX_MASK) |
301 (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
302 (1 << CQ_ENTRY_READY_SHIFT));
305 * No need to protect pi_offset because scheduling to the
306 * H/W queues is done under the scheduler mutex
308 * No need to check if CQ is full because it was already
309 * checked in ext_queue_sanity_checks
311 cq = &hdev->completion_queue[q->hw_queue_id];
312 cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
314 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
315 cq_addr,
316 le32_to_cpu(cq_pkt.data),
317 q->hw_queue_id);
319 q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
321 cq->pi = hl_cq_inc_ptr(cq->pi);
323 ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
327 * int_queue_schedule_job - submit a JOB to an internal queue
329 * @job: pointer to the job that needs to be submitted to the queue
331 * This function must be called when the scheduler mutex is taken
334 static void int_queue_schedule_job(struct hl_cs_job *job)
336 struct hl_device *hdev = job->cs->ctx->hdev;
337 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
338 struct hl_bd bd;
339 __le64 *pi;
341 bd.ctl = 0;
342 bd.len = cpu_to_le32(job->job_cb_size);
343 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
345 pi = (__le64 *) (uintptr_t) (q->kernel_address +
346 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
348 q->pi++;
349 q->pi &= ((q->int_queue_len << 1) - 1);
351 hdev->asic_funcs->pqe_write(hdev, pi, &bd);
353 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
357 * hw_queue_schedule_job - submit a JOB to a H/W queue
359 * @job: pointer to the job that needs to be submitted to the queue
361 * This function must be called when the scheduler mutex is taken
364 static void hw_queue_schedule_job(struct hl_cs_job *job)
366 struct hl_device *hdev = job->cs->ctx->hdev;
367 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
368 struct hl_cq *cq;
369 u64 ptr;
370 u32 offset, ctl, len;
373 * Upon PQE completion, COMP_DATA is used as the write data to the
374 * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
375 * write address offset in the SM block (QMAN LBW message).
376 * The write address offset is calculated as "COMP_OFFSET << 2".
378 offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
379 ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
380 ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
382 len = job->job_cb_size;
385 * A patched CB is created only if a user CB was allocated by driver and
386 * MMU is disabled. If MMU is enabled, the user CB should be used
387 * instead. If the user CB wasn't allocated by driver, assume that it
388 * holds an address.
390 if (job->patched_cb)
391 ptr = job->patched_cb->bus_address;
392 else if (job->is_kernel_allocated_cb)
393 ptr = job->user_cb->bus_address;
394 else
395 ptr = (u64) (uintptr_t) job->user_cb;
398 * No need to protect pi_offset because scheduling to the
399 * H/W queues is done under the scheduler mutex
401 * No need to check if CQ is full because it was already
402 * checked in hw_queue_sanity_checks
404 cq = &hdev->completion_queue[q->hw_queue_id];
405 cq->pi = hl_cq_inc_ptr(cq->pi);
407 ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
411 * hl_hw_queue_schedule_cs - schedule a command submission
413 * @job : pointer to the CS
416 int hl_hw_queue_schedule_cs(struct hl_cs *cs)
418 struct hl_device *hdev = cs->ctx->hdev;
419 struct hl_cs_job *job, *tmp;
420 struct hl_hw_queue *q;
421 int rc = 0, i, cq_cnt;
423 hdev->asic_funcs->hw_queues_lock(hdev);
425 if (hl_device_disabled_or_in_reset(hdev)) {
426 dev_err(hdev->dev,
427 "device is disabled or in reset, CS rejected!\n");
428 rc = -EPERM;
429 goto out;
432 q = &hdev->kernel_queues[0];
433 for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
434 if (cs->jobs_in_queue_cnt[i]) {
435 switch (q->queue_type) {
436 case QUEUE_TYPE_EXT:
437 rc = ext_queue_sanity_checks(hdev, q,
438 cs->jobs_in_queue_cnt[i], true);
439 break;
440 case QUEUE_TYPE_INT:
441 rc = int_queue_sanity_checks(hdev, q,
442 cs->jobs_in_queue_cnt[i]);
443 break;
444 case QUEUE_TYPE_HW:
445 rc = hw_queue_sanity_checks(hdev, q,
446 cs->jobs_in_queue_cnt[i]);
447 break;
448 default:
449 dev_err(hdev->dev, "Queue type %d is invalid\n",
450 q->queue_type);
451 rc = -EINVAL;
452 break;
455 if (rc)
456 goto unroll_cq_resv;
458 if (q->queue_type == QUEUE_TYPE_EXT ||
459 q->queue_type == QUEUE_TYPE_HW)
460 cq_cnt++;
464 spin_lock(&hdev->hw_queues_mirror_lock);
465 list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
467 /* Queue TDR if the CS is the first entry and if timeout is wanted */
468 if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
469 (list_first_entry(&hdev->hw_queues_mirror_list,
470 struct hl_cs, mirror_node) == cs)) {
471 cs->tdr_active = true;
472 schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
473 spin_unlock(&hdev->hw_queues_mirror_lock);
474 } else {
475 spin_unlock(&hdev->hw_queues_mirror_lock);
478 if (!hdev->cs_active_cnt++) {
479 struct hl_device_idle_busy_ts *ts;
481 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
482 ts->busy_to_idle_ts = ktime_set(0, 0);
483 ts->idle_to_busy_ts = ktime_get();
486 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
487 switch (job->queue_type) {
488 case QUEUE_TYPE_EXT:
489 ext_queue_schedule_job(job);
490 break;
491 case QUEUE_TYPE_INT:
492 int_queue_schedule_job(job);
493 break;
494 case QUEUE_TYPE_HW:
495 hw_queue_schedule_job(job);
496 break;
497 default:
498 break;
501 cs->submitted = true;
503 goto out;
505 unroll_cq_resv:
506 q = &hdev->kernel_queues[0];
507 for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
508 if ((q->queue_type == QUEUE_TYPE_EXT ||
509 q->queue_type == QUEUE_TYPE_HW) &&
510 cs->jobs_in_queue_cnt[i]) {
511 atomic_t *free_slots =
512 &hdev->completion_queue[i].free_slots_cnt;
513 atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
514 cq_cnt--;
518 out:
519 hdev->asic_funcs->hw_queues_unlock(hdev);
521 return rc;
525 * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
527 * @hdev: pointer to hl_device structure
528 * @hw_queue_id: which queue to increment its ci
530 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
532 struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
534 q->ci = hl_queue_inc_ptr(q->ci);
537 static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
538 bool is_cpu_queue)
540 void *p;
541 int rc;
543 if (is_cpu_queue)
544 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
545 HL_QUEUE_SIZE_IN_BYTES,
546 &q->bus_address);
547 else
548 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
549 HL_QUEUE_SIZE_IN_BYTES,
550 &q->bus_address,
551 GFP_KERNEL | __GFP_ZERO);
552 if (!p)
553 return -ENOMEM;
555 q->kernel_address = (u64) (uintptr_t) p;
557 q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
558 sizeof(*q->shadow_queue),
559 GFP_KERNEL);
560 if (!q->shadow_queue) {
561 dev_err(hdev->dev,
562 "Failed to allocate shadow queue for H/W queue %d\n",
563 q->hw_queue_id);
564 rc = -ENOMEM;
565 goto free_queue;
568 /* Make sure read/write pointers are initialized to start of queue */
569 q->ci = 0;
570 q->pi = 0;
572 return 0;
574 free_queue:
575 if (is_cpu_queue)
576 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
577 HL_QUEUE_SIZE_IN_BYTES,
578 (void *) (uintptr_t) q->kernel_address);
579 else
580 hdev->asic_funcs->asic_dma_free_coherent(hdev,
581 HL_QUEUE_SIZE_IN_BYTES,
582 (void *) (uintptr_t) q->kernel_address,
583 q->bus_address);
585 return rc;
588 static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
590 void *p;
592 p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
593 &q->bus_address, &q->int_queue_len);
594 if (!p) {
595 dev_err(hdev->dev,
596 "Failed to get base address for internal queue %d\n",
597 q->hw_queue_id);
598 return -EFAULT;
601 q->kernel_address = (u64) (uintptr_t) p;
602 q->pi = 0;
603 q->ci = 0;
605 return 0;
608 static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
610 return ext_and_cpu_queue_init(hdev, q, true);
613 static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
615 return ext_and_cpu_queue_init(hdev, q, false);
618 static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
620 void *p;
622 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
623 HL_QUEUE_SIZE_IN_BYTES,
624 &q->bus_address,
625 GFP_KERNEL | __GFP_ZERO);
626 if (!p)
627 return -ENOMEM;
629 q->kernel_address = (u64) (uintptr_t) p;
631 /* Make sure read/write pointers are initialized to start of queue */
632 q->ci = 0;
633 q->pi = 0;
635 return 0;
639 * queue_init - main initialization function for H/W queue object
641 * @hdev: pointer to hl_device device structure
642 * @q: pointer to hl_hw_queue queue structure
643 * @hw_queue_id: The id of the H/W queue
645 * Allocate dma-able memory for the queue and initialize fields
646 * Returns 0 on success
648 static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
649 u32 hw_queue_id)
651 int rc;
653 BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
655 q->hw_queue_id = hw_queue_id;
657 switch (q->queue_type) {
658 case QUEUE_TYPE_EXT:
659 rc = ext_queue_init(hdev, q);
660 break;
661 case QUEUE_TYPE_INT:
662 rc = int_queue_init(hdev, q);
663 break;
664 case QUEUE_TYPE_CPU:
665 rc = cpu_queue_init(hdev, q);
666 break;
667 case QUEUE_TYPE_HW:
668 rc = hw_queue_init(hdev, q);
669 break;
670 case QUEUE_TYPE_NA:
671 q->valid = 0;
672 return 0;
673 default:
674 dev_crit(hdev->dev, "wrong queue type %d during init\n",
675 q->queue_type);
676 rc = -EINVAL;
677 break;
680 if (rc)
681 return rc;
683 q->valid = 1;
685 return 0;
689 * hw_queue_fini - destroy queue
691 * @hdev: pointer to hl_device device structure
692 * @q: pointer to hl_hw_queue queue structure
694 * Free the queue memory
696 static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
698 if (!q->valid)
699 return;
702 * If we arrived here, there are no jobs waiting on this queue
703 * so we can safely remove it.
704 * This is because this function can only called when:
705 * 1. Either a context is deleted, which only can occur if all its
706 * jobs were finished
707 * 2. A context wasn't able to be created due to failure or timeout,
708 * which means there are no jobs on the queue yet
710 * The only exception are the queues of the kernel context, but
711 * if they are being destroyed, it means that the entire module is
712 * being removed. If the module is removed, it means there is no open
713 * user context. It also means that if a job was submitted by
714 * the kernel driver (e.g. context creation), the job itself was
715 * released by the kernel driver when a timeout occurred on its
716 * Completion. Thus, we don't need to release it again.
719 if (q->queue_type == QUEUE_TYPE_INT)
720 return;
722 kfree(q->shadow_queue);
724 if (q->queue_type == QUEUE_TYPE_CPU)
725 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
726 HL_QUEUE_SIZE_IN_BYTES,
727 (void *) (uintptr_t) q->kernel_address);
728 else
729 hdev->asic_funcs->asic_dma_free_coherent(hdev,
730 HL_QUEUE_SIZE_IN_BYTES,
731 (void *) (uintptr_t) q->kernel_address,
732 q->bus_address);
735 int hl_hw_queues_create(struct hl_device *hdev)
737 struct asic_fixed_properties *asic = &hdev->asic_prop;
738 struct hl_hw_queue *q;
739 int i, rc, q_ready_cnt;
741 hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
742 sizeof(*hdev->kernel_queues), GFP_KERNEL);
744 if (!hdev->kernel_queues) {
745 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
746 return -ENOMEM;
749 /* Initialize the H/W queues */
750 for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
751 i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
753 q->queue_type = asic->hw_queues_props[i].type;
754 rc = queue_init(hdev, q, i);
755 if (rc) {
756 dev_err(hdev->dev,
757 "failed to initialize queue %d\n", i);
758 goto release_queues;
762 return 0;
764 release_queues:
765 for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
766 queue_fini(hdev, q);
768 kfree(hdev->kernel_queues);
770 return rc;
773 void hl_hw_queues_destroy(struct hl_device *hdev)
775 struct hl_hw_queue *q;
776 int i;
778 for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
779 queue_fini(hdev, q);
781 kfree(hdev->kernel_queues);
784 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
786 struct hl_hw_queue *q;
787 int i;
789 for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
790 if ((!q->valid) ||
791 ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
792 continue;
793 q->pi = q->ci = 0;