gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / misc / habanalabs / command_submission.c
blob409276b6374d7caa3032d5fbf1f72ded0603ebbf
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
14 static void job_wq_completion(struct work_struct *work);
15 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
16 struct hl_ctx *ctx, u64 timeout_us, u64 seq);
17 static void cs_do_release(struct kref *ref);
19 static const char *hl_fence_get_driver_name(struct dma_fence *fence)
21 return "HabanaLabs";
24 static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
26 struct hl_dma_fence *hl_fence =
27 container_of(fence, struct hl_dma_fence, base_fence);
29 return dev_name(hl_fence->hdev->dev);
32 static bool hl_fence_enable_signaling(struct dma_fence *fence)
34 return true;
37 static void hl_fence_release(struct dma_fence *fence)
39 struct hl_dma_fence *hl_fence =
40 container_of(fence, struct hl_dma_fence, base_fence);
42 kfree_rcu(hl_fence, base_fence.rcu);
45 static const struct dma_fence_ops hl_fence_ops = {
46 .get_driver_name = hl_fence_get_driver_name,
47 .get_timeline_name = hl_fence_get_timeline_name,
48 .enable_signaling = hl_fence_enable_signaling,
49 .wait = dma_fence_default_wait,
50 .release = hl_fence_release
53 static void cs_get(struct hl_cs *cs)
55 kref_get(&cs->refcount);
58 static int cs_get_unless_zero(struct hl_cs *cs)
60 return kref_get_unless_zero(&cs->refcount);
63 static void cs_put(struct hl_cs *cs)
65 kref_put(&cs->refcount, cs_do_release);
68 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
71 * Patched CB is created for external queues jobs, and for H/W queues
72 * jobs if the user CB was allocated by driver and MMU is disabled.
74 return (job->queue_type == QUEUE_TYPE_EXT ||
75 (job->queue_type == QUEUE_TYPE_HW &&
76 job->is_kernel_allocated_cb &&
77 !hdev->mmu_enable));
81 * cs_parser - parse the user command submission
83 * @hpriv : pointer to the private data of the fd
84 * @job : pointer to the job that holds the command submission info
86 * The function parses the command submission of the user. It calls the
87 * ASIC specific parser, which returns a list of memory blocks to send
88 * to the device as different command buffers
91 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
93 struct hl_device *hdev = hpriv->hdev;
94 struct hl_cs_parser parser;
95 int rc;
97 parser.ctx_id = job->cs->ctx->asid;
98 parser.cs_sequence = job->cs->sequence;
99 parser.job_id = job->id;
101 parser.hw_queue_id = job->hw_queue_id;
102 parser.job_userptr_list = &job->userptr_list;
103 parser.patched_cb = NULL;
104 parser.user_cb = job->user_cb;
105 parser.user_cb_size = job->user_cb_size;
106 parser.queue_type = job->queue_type;
107 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
108 job->patched_cb = NULL;
110 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
112 if (is_cb_patched(hdev, job)) {
113 if (!rc) {
114 job->patched_cb = parser.patched_cb;
115 job->job_cb_size = parser.patched_cb_size;
117 spin_lock(&job->patched_cb->lock);
118 job->patched_cb->cs_cnt++;
119 spin_unlock(&job->patched_cb->lock);
123 * Whether the parsing worked or not, we don't need the
124 * original CB anymore because it was already parsed and
125 * won't be accessed again for this CS
127 spin_lock(&job->user_cb->lock);
128 job->user_cb->cs_cnt--;
129 spin_unlock(&job->user_cb->lock);
130 hl_cb_put(job->user_cb);
131 job->user_cb = NULL;
132 } else if (!rc) {
133 job->job_cb_size = job->user_cb_size;
136 return rc;
139 static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
141 struct hl_cs *cs = job->cs;
143 if (is_cb_patched(hdev, job)) {
144 hl_userptr_delete_list(hdev, &job->userptr_list);
147 * We might arrive here from rollback and patched CB wasn't
148 * created, so we need to check it's not NULL
150 if (job->patched_cb) {
151 spin_lock(&job->patched_cb->lock);
152 job->patched_cb->cs_cnt--;
153 spin_unlock(&job->patched_cb->lock);
155 hl_cb_put(job->patched_cb);
159 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
160 * enabled, the user CB isn't released in cs_parser() and thus should be
161 * released here.
163 if (job->queue_type == QUEUE_TYPE_HW &&
164 job->is_kernel_allocated_cb && hdev->mmu_enable) {
165 spin_lock(&job->user_cb->lock);
166 job->user_cb->cs_cnt--;
167 spin_unlock(&job->user_cb->lock);
169 hl_cb_put(job->user_cb);
173 * This is the only place where there can be multiple threads
174 * modifying the list at the same time
176 spin_lock(&cs->job_lock);
177 list_del(&job->cs_node);
178 spin_unlock(&cs->job_lock);
180 hl_debugfs_remove_job(hdev, job);
182 if (job->queue_type == QUEUE_TYPE_EXT ||
183 job->queue_type == QUEUE_TYPE_HW)
184 cs_put(cs);
186 kfree(job);
189 static void cs_do_release(struct kref *ref)
191 struct hl_cs *cs = container_of(ref, struct hl_cs,
192 refcount);
193 struct hl_device *hdev = cs->ctx->hdev;
194 struct hl_cs_job *job, *tmp;
196 cs->completed = true;
199 * Although if we reached here it means that all external jobs have
200 * finished, because each one of them took refcnt to CS, we still
201 * need to go over the internal jobs and free them. Otherwise, we
202 * will have leaked memory and what's worse, the CS object (and
203 * potentially the CTX object) could be released, while the JOB
204 * still holds a pointer to them (but no reference).
206 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
207 free_job(hdev, job);
209 /* We also need to update CI for internal queues */
210 if (cs->submitted) {
211 hdev->asic_funcs->hw_queues_lock(hdev);
213 hdev->cs_active_cnt--;
214 if (!hdev->cs_active_cnt) {
215 struct hl_device_idle_busy_ts *ts;
217 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
218 ts->busy_to_idle_ts = ktime_get();
220 if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
221 hdev->idle_busy_ts_idx = 0;
222 } else if (hdev->cs_active_cnt < 0) {
223 dev_crit(hdev->dev, "CS active cnt %d is negative\n",
224 hdev->cs_active_cnt);
227 hdev->asic_funcs->hw_queues_unlock(hdev);
229 hl_int_hw_queue_update_ci(cs);
231 spin_lock(&hdev->hw_queues_mirror_lock);
232 /* remove CS from hw_queues mirror list */
233 list_del_init(&cs->mirror_node);
234 spin_unlock(&hdev->hw_queues_mirror_lock);
237 * Don't cancel TDR in case this CS was timedout because we
238 * might be running from the TDR context
240 if ((!cs->timedout) &&
241 (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
242 struct hl_cs *next;
244 if (cs->tdr_active)
245 cancel_delayed_work_sync(&cs->work_tdr);
247 spin_lock(&hdev->hw_queues_mirror_lock);
249 /* queue TDR for next CS */
250 next = list_first_entry_or_null(
251 &hdev->hw_queues_mirror_list,
252 struct hl_cs, mirror_node);
254 if ((next) && (!next->tdr_active)) {
255 next->tdr_active = true;
256 schedule_delayed_work(&next->work_tdr,
257 hdev->timeout_jiffies);
260 spin_unlock(&hdev->hw_queues_mirror_lock);
265 * Must be called before hl_ctx_put because inside we use ctx to get
266 * the device
268 hl_debugfs_remove_cs(cs);
270 hl_ctx_put(cs->ctx);
272 if (cs->timedout)
273 dma_fence_set_error(cs->fence, -ETIMEDOUT);
274 else if (cs->aborted)
275 dma_fence_set_error(cs->fence, -EIO);
277 dma_fence_signal(cs->fence);
278 dma_fence_put(cs->fence);
280 kfree(cs);
283 static void cs_timedout(struct work_struct *work)
285 struct hl_device *hdev;
286 int ctx_asid, rc;
287 struct hl_cs *cs = container_of(work, struct hl_cs,
288 work_tdr.work);
289 rc = cs_get_unless_zero(cs);
290 if (!rc)
291 return;
293 if ((!cs->submitted) || (cs->completed)) {
294 cs_put(cs);
295 return;
298 /* Mark the CS is timed out so we won't try to cancel its TDR */
299 cs->timedout = true;
301 hdev = cs->ctx->hdev;
302 ctx_asid = cs->ctx->asid;
304 /* TODO: add information about last signaled seq and last emitted seq */
305 dev_err(hdev->dev, "User %d command submission %llu got stuck!\n",
306 ctx_asid, cs->sequence);
308 cs_put(cs);
310 if (hdev->reset_on_lockup)
311 hl_device_reset(hdev, false, false);
314 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
315 struct hl_cs **cs_new)
317 struct hl_dma_fence *fence;
318 struct dma_fence *other = NULL;
319 struct hl_cs *cs;
320 int rc;
322 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
323 if (!cs)
324 return -ENOMEM;
326 cs->ctx = ctx;
327 cs->submitted = false;
328 cs->completed = false;
329 INIT_LIST_HEAD(&cs->job_list);
330 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
331 kref_init(&cs->refcount);
332 spin_lock_init(&cs->job_lock);
334 fence = kmalloc(sizeof(*fence), GFP_ATOMIC);
335 if (!fence) {
336 rc = -ENOMEM;
337 goto free_cs;
340 fence->hdev = hdev;
341 spin_lock_init(&fence->lock);
342 cs->fence = &fence->base_fence;
344 spin_lock(&ctx->cs_lock);
346 fence->cs_seq = ctx->cs_sequence;
347 other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
348 if ((other) && (!dma_fence_is_signaled(other))) {
349 spin_unlock(&ctx->cs_lock);
350 dev_dbg(hdev->dev,
351 "Rejecting CS because of too many in-flights CS\n");
352 rc = -EAGAIN;
353 goto free_fence;
356 dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock,
357 ctx->asid, ctx->cs_sequence);
359 cs->sequence = fence->cs_seq;
361 ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] =
362 &fence->base_fence;
363 ctx->cs_sequence++;
365 dma_fence_get(&fence->base_fence);
367 dma_fence_put(other);
369 spin_unlock(&ctx->cs_lock);
371 *cs_new = cs;
373 return 0;
375 free_fence:
376 kfree(fence);
377 free_cs:
378 kfree(cs);
379 return rc;
382 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
384 struct hl_cs_job *job, *tmp;
386 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
387 free_job(hdev, job);
390 void hl_cs_rollback_all(struct hl_device *hdev)
392 struct hl_cs *cs, *tmp;
394 /* flush all completions */
395 flush_workqueue(hdev->cq_wq);
397 /* Make sure we don't have leftovers in the H/W queues mirror list */
398 list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
399 mirror_node) {
400 cs_get(cs);
401 cs->aborted = true;
402 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
403 cs->ctx->asid, cs->sequence);
404 cs_rollback(hdev, cs);
405 cs_put(cs);
409 static void job_wq_completion(struct work_struct *work)
411 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
412 finish_work);
413 struct hl_cs *cs = job->cs;
414 struct hl_device *hdev = cs->ctx->hdev;
416 /* job is no longer needed */
417 free_job(hdev, job);
420 static int validate_queue_index(struct hl_device *hdev,
421 struct hl_cs_chunk *chunk,
422 enum hl_queue_type *queue_type,
423 bool *is_kernel_allocated_cb)
425 struct asic_fixed_properties *asic = &hdev->asic_prop;
426 struct hw_queue_properties *hw_queue_prop;
428 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
430 if ((chunk->queue_index >= HL_MAX_QUEUES) ||
431 (hw_queue_prop->type == QUEUE_TYPE_NA)) {
432 dev_err(hdev->dev, "Queue index %d is invalid\n",
433 chunk->queue_index);
434 return -EINVAL;
437 if (hw_queue_prop->driver_only) {
438 dev_err(hdev->dev,
439 "Queue index %d is restricted for the kernel driver\n",
440 chunk->queue_index);
441 return -EINVAL;
444 *queue_type = hw_queue_prop->type;
445 *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb;
447 return 0;
450 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
451 struct hl_cb_mgr *cb_mgr,
452 struct hl_cs_chunk *chunk)
454 struct hl_cb *cb;
455 u32 cb_handle;
457 cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
459 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
460 if (!cb) {
461 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
462 return NULL;
465 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
466 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
467 goto release_cb;
470 spin_lock(&cb->lock);
471 cb->cs_cnt++;
472 spin_unlock(&cb->lock);
474 return cb;
476 release_cb:
477 hl_cb_put(cb);
478 return NULL;
481 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
482 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
484 struct hl_cs_job *job;
486 job = kzalloc(sizeof(*job), GFP_ATOMIC);
487 if (!job)
488 return NULL;
490 job->queue_type = queue_type;
491 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
493 if (is_cb_patched(hdev, job))
494 INIT_LIST_HEAD(&job->userptr_list);
496 if (job->queue_type == QUEUE_TYPE_EXT)
497 INIT_WORK(&job->finish_work, job_wq_completion);
499 return job;
502 static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
503 u32 num_chunks, u64 *cs_seq)
505 struct hl_device *hdev = hpriv->hdev;
506 struct hl_cs_chunk *cs_chunk_array;
507 struct hl_cs_job *job;
508 struct hl_cs *cs;
509 struct hl_cb *cb;
510 bool int_queues_only = true;
511 u32 size_to_copy;
512 int rc, i;
514 *cs_seq = ULLONG_MAX;
516 if (num_chunks > HL_MAX_JOBS_PER_CS) {
517 dev_err(hdev->dev,
518 "Number of chunks can NOT be larger than %d\n",
519 HL_MAX_JOBS_PER_CS);
520 rc = -EINVAL;
521 goto out;
524 cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
525 GFP_ATOMIC);
526 if (!cs_chunk_array) {
527 rc = -ENOMEM;
528 goto out;
531 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
532 if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
533 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
534 rc = -EFAULT;
535 goto free_cs_chunk_array;
538 /* increment refcnt for context */
539 hl_ctx_get(hdev, hpriv->ctx);
541 rc = allocate_cs(hdev, hpriv->ctx, &cs);
542 if (rc) {
543 hl_ctx_put(hpriv->ctx);
544 goto free_cs_chunk_array;
547 *cs_seq = cs->sequence;
549 hl_debugfs_add_cs(cs);
551 /* Validate ALL the CS chunks before submitting the CS */
552 for (i = 0 ; i < num_chunks ; i++) {
553 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
554 enum hl_queue_type queue_type;
555 bool is_kernel_allocated_cb;
557 rc = validate_queue_index(hdev, chunk, &queue_type,
558 &is_kernel_allocated_cb);
559 if (rc)
560 goto free_cs_object;
562 if (is_kernel_allocated_cb) {
563 cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
564 if (!cb) {
565 rc = -EINVAL;
566 goto free_cs_object;
568 } else {
569 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
572 if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
573 int_queues_only = false;
575 job = hl_cs_allocate_job(hdev, queue_type,
576 is_kernel_allocated_cb);
577 if (!job) {
578 dev_err(hdev->dev, "Failed to allocate a new job\n");
579 rc = -ENOMEM;
580 if (is_kernel_allocated_cb)
581 goto release_cb;
582 else
583 goto free_cs_object;
586 job->id = i + 1;
587 job->cs = cs;
588 job->user_cb = cb;
589 job->user_cb_size = chunk->cb_size;
590 job->hw_queue_id = chunk->queue_index;
592 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
594 list_add_tail(&job->cs_node, &cs->job_list);
597 * Increment CS reference. When CS reference is 0, CS is
598 * done and can be signaled to user and free all its resources
599 * Only increment for JOB on external or H/W queues, because
600 * only for those JOBs we get completion
602 if (job->queue_type == QUEUE_TYPE_EXT ||
603 job->queue_type == QUEUE_TYPE_HW)
604 cs_get(cs);
606 hl_debugfs_add_job(hdev, job);
608 rc = cs_parser(hpriv, job);
609 if (rc) {
610 dev_err(hdev->dev,
611 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
612 cs->ctx->asid, cs->sequence, job->id, rc);
613 goto free_cs_object;
617 if (int_queues_only) {
618 dev_err(hdev->dev,
619 "Reject CS %d.%llu because only internal queues jobs are present\n",
620 cs->ctx->asid, cs->sequence);
621 rc = -EINVAL;
622 goto free_cs_object;
625 rc = hl_hw_queue_schedule_cs(cs);
626 if (rc) {
627 if (rc != -EAGAIN)
628 dev_err(hdev->dev,
629 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
630 cs->ctx->asid, cs->sequence, rc);
631 goto free_cs_object;
634 rc = HL_CS_STATUS_SUCCESS;
635 goto put_cs;
637 release_cb:
638 spin_lock(&cb->lock);
639 cb->cs_cnt--;
640 spin_unlock(&cb->lock);
641 hl_cb_put(cb);
642 free_cs_object:
643 cs_rollback(hdev, cs);
644 *cs_seq = ULLONG_MAX;
645 /* The path below is both for good and erroneous exits */
646 put_cs:
647 /* We finished with the CS in this function, so put the ref */
648 cs_put(cs);
649 free_cs_chunk_array:
650 kfree(cs_chunk_array);
651 out:
652 return rc;
655 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
657 struct hl_device *hdev = hpriv->hdev;
658 union hl_cs_args *args = data;
659 struct hl_ctx *ctx = hpriv->ctx;
660 void __user *chunks_execute, *chunks_restore;
661 u32 num_chunks_execute, num_chunks_restore;
662 u64 cs_seq = ULONG_MAX;
663 int rc, do_ctx_switch;
664 bool need_soft_reset = false;
666 if (hl_device_disabled_or_in_reset(hdev)) {
667 dev_warn_ratelimited(hdev->dev,
668 "Device is %s. Can't submit new CS\n",
669 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
670 rc = -EBUSY;
671 goto out;
674 chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute;
675 num_chunks_execute = args->in.num_chunks_execute;
677 if (!num_chunks_execute) {
678 dev_err(hdev->dev,
679 "Got execute CS with 0 chunks, context %d\n",
680 ctx->asid);
681 rc = -EINVAL;
682 goto out;
685 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
687 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
688 long ret;
690 chunks_restore =
691 (void __user *) (uintptr_t) args->in.chunks_restore;
692 num_chunks_restore = args->in.num_chunks_restore;
694 mutex_lock(&hpriv->restore_phase_mutex);
696 if (do_ctx_switch) {
697 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
698 if (rc) {
699 dev_err_ratelimited(hdev->dev,
700 "Failed to switch to context %d, rejecting CS! %d\n",
701 ctx->asid, rc);
703 * If we timedout, or if the device is not IDLE
704 * while we want to do context-switch (-EBUSY),
705 * we need to soft-reset because QMAN is
706 * probably stuck. However, we can't call to
707 * reset here directly because of deadlock, so
708 * need to do it at the very end of this
709 * function
711 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
712 need_soft_reset = true;
713 mutex_unlock(&hpriv->restore_phase_mutex);
714 goto out;
718 hdev->asic_funcs->restore_phase_topology(hdev);
720 if (!num_chunks_restore) {
721 dev_dbg(hdev->dev,
722 "Need to run restore phase but restore CS is empty\n");
723 rc = 0;
724 } else {
725 rc = _hl_cs_ioctl(hpriv, chunks_restore,
726 num_chunks_restore, &cs_seq);
729 mutex_unlock(&hpriv->restore_phase_mutex);
731 if (rc) {
732 dev_err(hdev->dev,
733 "Failed to submit restore CS for context %d (%d)\n",
734 ctx->asid, rc);
735 goto out;
738 /* Need to wait for restore completion before execution phase */
739 if (num_chunks_restore) {
740 ret = _hl_cs_wait_ioctl(hdev, ctx,
741 jiffies_to_usecs(hdev->timeout_jiffies),
742 cs_seq);
743 if (ret <= 0) {
744 dev_err(hdev->dev,
745 "Restore CS for context %d failed to complete %ld\n",
746 ctx->asid, ret);
747 rc = -ENOEXEC;
748 goto out;
752 ctx->thread_ctx_switch_wait_token = 1;
753 } else if (!ctx->thread_ctx_switch_wait_token) {
754 u32 tmp;
756 rc = hl_poll_timeout_memory(hdev,
757 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
758 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
760 if (rc == -ETIMEDOUT) {
761 dev_err(hdev->dev,
762 "context switch phase timeout (%d)\n", tmp);
763 goto out;
767 rc = _hl_cs_ioctl(hpriv, chunks_execute, num_chunks_execute, &cs_seq);
769 out:
770 if (rc != -EAGAIN) {
771 memset(args, 0, sizeof(*args));
772 args->out.status = rc;
773 args->out.seq = cs_seq;
776 if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
777 hl_device_reset(hdev, false, false);
779 return rc;
782 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
783 struct hl_ctx *ctx, u64 timeout_us, u64 seq)
785 struct dma_fence *fence;
786 unsigned long timeout;
787 long rc;
789 if (timeout_us == MAX_SCHEDULE_TIMEOUT)
790 timeout = timeout_us;
791 else
792 timeout = usecs_to_jiffies(timeout_us);
794 hl_ctx_get(hdev, ctx);
796 fence = hl_ctx_get_fence(ctx, seq);
797 if (IS_ERR(fence)) {
798 rc = PTR_ERR(fence);
799 } else if (fence) {
800 rc = dma_fence_wait_timeout(fence, true, timeout);
801 if (fence->error == -ETIMEDOUT)
802 rc = -ETIMEDOUT;
803 else if (fence->error == -EIO)
804 rc = -EIO;
805 dma_fence_put(fence);
806 } else
807 rc = 1;
809 hl_ctx_put(ctx);
811 return rc;
814 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
816 struct hl_device *hdev = hpriv->hdev;
817 union hl_wait_cs_args *args = data;
818 u64 seq = args->in.seq;
819 long rc;
821 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
823 memset(args, 0, sizeof(*args));
825 if (rc < 0) {
826 dev_err_ratelimited(hdev->dev,
827 "Error %ld on waiting for CS handle %llu\n",
828 rc, seq);
829 if (rc == -ERESTARTSYS) {
830 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
831 rc = -EINTR;
832 } else if (rc == -ETIMEDOUT) {
833 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
834 } else if (rc == -EIO) {
835 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
837 return rc;
840 if (rc == 0)
841 args->out.status = HL_WAIT_CS_STATUS_BUSY;
842 else
843 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
845 return 0;