Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / misc / habanalabs / common / command_submission.c
blobb2b3d2b0f808ad1a39b3fbf804c7eb52245ffff4
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
14 #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 HL_CS_FLAGS_COLLECTIVE_WAIT)
17 /**
18 * enum hl_cs_wait_status - cs wait status
19 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20 * @CS_WAIT_STATUS_COMPLETED: cs completed
21 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
23 enum hl_cs_wait_status {
24 CS_WAIT_STATUS_BUSY,
25 CS_WAIT_STATUS_COMPLETED,
26 CS_WAIT_STATUS_GONE
29 static void job_wq_completion(struct work_struct *work);
30 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
31 u64 timeout_us, u64 seq,
32 enum hl_cs_wait_status *status, s64 *timestamp);
33 static void cs_do_release(struct kref *ref);
35 static void hl_sob_reset(struct kref *ref)
37 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
38 kref);
39 struct hl_device *hdev = hw_sob->hdev;
41 hdev->asic_funcs->reset_sob(hdev, hw_sob);
44 void hl_sob_reset_error(struct kref *ref)
46 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
47 kref);
48 struct hl_device *hdev = hw_sob->hdev;
50 dev_crit(hdev->dev,
51 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
52 hw_sob->q_idx, hw_sob->sob_id);
55 /**
56 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
57 * @sob_base: sob base id
58 * @sob_mask: sob user mask, each bit represents a sob offset from sob base
59 * @mask: generated mask
61 * Return: 0 if given parameters are valid
63 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
65 int i;
67 if (sob_mask == 0)
68 return -EINVAL;
70 if (sob_mask == 0x1) {
71 *mask = ~(1 << (sob_base & 0x7));
72 } else {
73 /* find msb in order to verify sob range is valid */
74 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
75 if (BIT(i) & sob_mask)
76 break;
78 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
79 return -EINVAL;
81 *mask = ~sob_mask;
84 return 0;
87 static void hl_fence_release(struct kref *kref)
89 struct hl_fence *fence =
90 container_of(kref, struct hl_fence, refcount);
91 struct hl_cs_compl *hl_cs_cmpl =
92 container_of(fence, struct hl_cs_compl, base_fence);
93 struct hl_device *hdev = hl_cs_cmpl->hdev;
95 /* EBUSY means the CS was never submitted and hence we don't have
96 * an attached hw_sob object that we should handle here
98 if (fence->error == -EBUSY)
99 goto free;
101 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
102 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
103 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) {
105 dev_dbg(hdev->dev,
106 "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
107 hl_cs_cmpl->cs_seq,
108 hl_cs_cmpl->type,
109 hl_cs_cmpl->hw_sob->sob_id,
110 hl_cs_cmpl->sob_val);
113 * A signal CS can get completion while the corresponding wait
114 * for signal CS is on its way to the PQ. The wait for signal CS
115 * will get stuck if the signal CS incremented the SOB to its
116 * max value and there are no pending (submitted) waits on this
117 * SOB.
118 * We do the following to void this situation:
119 * 1. The wait for signal CS must get a ref for the signal CS as
120 * soon as possible in cs_ioctl_signal_wait() and put it
121 * before being submitted to the PQ but after it incremented
122 * the SOB refcnt in init_signal_wait_cs().
123 * 2. Signal/Wait for signal CS will decrement the SOB refcnt
124 * here.
125 * These two measures guarantee that the wait for signal CS will
126 * reset the SOB upon completion rather than the signal CS and
127 * hence the above scenario is avoided.
129 kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
131 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
132 hdev->asic_funcs->reset_sob_group(hdev,
133 hl_cs_cmpl->sob_group);
136 free:
137 kfree(hl_cs_cmpl);
140 void hl_fence_put(struct hl_fence *fence)
142 if (fence)
143 kref_put(&fence->refcount, hl_fence_release);
146 void hl_fence_get(struct hl_fence *fence)
148 if (fence)
149 kref_get(&fence->refcount);
152 static void hl_fence_init(struct hl_fence *fence)
154 kref_init(&fence->refcount);
155 fence->error = 0;
156 fence->timestamp = ktime_set(0, 0);
157 init_completion(&fence->completion);
160 void cs_get(struct hl_cs *cs)
162 kref_get(&cs->refcount);
165 static int cs_get_unless_zero(struct hl_cs *cs)
167 return kref_get_unless_zero(&cs->refcount);
170 static void cs_put(struct hl_cs *cs)
172 kref_put(&cs->refcount, cs_do_release);
175 static void cs_job_do_release(struct kref *ref)
177 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
179 kfree(job);
182 static void cs_job_put(struct hl_cs_job *job)
184 kref_put(&job->refcount, cs_job_do_release);
187 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
190 * Patched CB is created for external queues jobs, and for H/W queues
191 * jobs if the user CB was allocated by driver and MMU is disabled.
193 return (job->queue_type == QUEUE_TYPE_EXT ||
194 (job->queue_type == QUEUE_TYPE_HW &&
195 job->is_kernel_allocated_cb &&
196 !hdev->mmu_enable));
200 * cs_parser - parse the user command submission
202 * @hpriv : pointer to the private data of the fd
203 * @job : pointer to the job that holds the command submission info
205 * The function parses the command submission of the user. It calls the
206 * ASIC specific parser, which returns a list of memory blocks to send
207 * to the device as different command buffers
210 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
212 struct hl_device *hdev = hpriv->hdev;
213 struct hl_cs_parser parser;
214 int rc;
216 parser.ctx_id = job->cs->ctx->asid;
217 parser.cs_sequence = job->cs->sequence;
218 parser.job_id = job->id;
220 parser.hw_queue_id = job->hw_queue_id;
221 parser.job_userptr_list = &job->userptr_list;
222 parser.patched_cb = NULL;
223 parser.user_cb = job->user_cb;
224 parser.user_cb_size = job->user_cb_size;
225 parser.queue_type = job->queue_type;
226 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
227 job->patched_cb = NULL;
229 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
231 if (is_cb_patched(hdev, job)) {
232 if (!rc) {
233 job->patched_cb = parser.patched_cb;
234 job->job_cb_size = parser.patched_cb_size;
235 job->contains_dma_pkt = parser.contains_dma_pkt;
236 atomic_inc(&job->patched_cb->cs_cnt);
240 * Whether the parsing worked or not, we don't need the
241 * original CB anymore because it was already parsed and
242 * won't be accessed again for this CS
244 atomic_dec(&job->user_cb->cs_cnt);
245 hl_cb_put(job->user_cb);
246 job->user_cb = NULL;
247 } else if (!rc) {
248 job->job_cb_size = job->user_cb_size;
251 return rc;
254 static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
256 struct hl_cs *cs = job->cs;
258 if (is_cb_patched(hdev, job)) {
259 hl_userptr_delete_list(hdev, &job->userptr_list);
262 * We might arrive here from rollback and patched CB wasn't
263 * created, so we need to check it's not NULL
265 if (job->patched_cb) {
266 atomic_dec(&job->patched_cb->cs_cnt);
267 hl_cb_put(job->patched_cb);
271 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
272 * enabled, the user CB isn't released in cs_parser() and thus should be
273 * released here.
274 * This is also true for INT queues jobs which were allocated by driver
276 if (job->is_kernel_allocated_cb &&
277 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
278 job->queue_type == QUEUE_TYPE_INT)) {
279 atomic_dec(&job->user_cb->cs_cnt);
280 hl_cb_put(job->user_cb);
284 * This is the only place where there can be multiple threads
285 * modifying the list at the same time
287 spin_lock(&cs->job_lock);
288 list_del(&job->cs_node);
289 spin_unlock(&cs->job_lock);
291 hl_debugfs_remove_job(hdev, job);
293 if (job->queue_type == QUEUE_TYPE_EXT ||
294 job->queue_type == QUEUE_TYPE_HW)
295 cs_put(cs);
297 cs_job_put(job);
300 static void cs_do_release(struct kref *ref)
302 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
303 struct hl_device *hdev = cs->ctx->hdev;
304 struct hl_cs_job *job, *tmp;
306 cs->completed = true;
309 * Although if we reached here it means that all external jobs have
310 * finished, because each one of them took refcnt to CS, we still
311 * need to go over the internal jobs and complete them. Otherwise, we
312 * will have leaked memory and what's worse, the CS object (and
313 * potentially the CTX object) could be released, while the JOB
314 * still holds a pointer to them (but no reference).
316 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
317 complete_job(hdev, job);
319 if (!cs->submitted) {
320 /* In case the wait for signal CS was submitted, the put occurs
321 * in init_signal_wait_cs() or collective_wait_init_cs()
322 * right before hanging on the PQ.
324 if (cs->type == CS_TYPE_WAIT ||
325 cs->type == CS_TYPE_COLLECTIVE_WAIT)
326 hl_fence_put(cs->signal_fence);
328 goto out;
331 hdev->asic_funcs->hw_queues_lock(hdev);
333 hdev->cs_active_cnt--;
334 if (!hdev->cs_active_cnt) {
335 struct hl_device_idle_busy_ts *ts;
337 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
338 ts->busy_to_idle_ts = ktime_get();
340 if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
341 hdev->idle_busy_ts_idx = 0;
342 } else if (hdev->cs_active_cnt < 0) {
343 dev_crit(hdev->dev, "CS active cnt %d is negative\n",
344 hdev->cs_active_cnt);
347 hdev->asic_funcs->hw_queues_unlock(hdev);
349 /* Need to update CI for internal queues */
350 hl_int_hw_queue_update_ci(cs);
352 /* remove CS from CS mirror list */
353 spin_lock(&hdev->cs_mirror_lock);
354 list_del_init(&cs->mirror_node);
355 spin_unlock(&hdev->cs_mirror_lock);
357 /* Don't cancel TDR in case this CS was timedout because we might be
358 * running from the TDR context
360 if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
361 struct hl_cs *next;
363 if (cs->tdr_active)
364 cancel_delayed_work_sync(&cs->work_tdr);
366 spin_lock(&hdev->cs_mirror_lock);
368 /* queue TDR for next CS */
369 next = list_first_entry_or_null(&hdev->cs_mirror_list,
370 struct hl_cs, mirror_node);
372 if (next && !next->tdr_active) {
373 next->tdr_active = true;
374 schedule_delayed_work(&next->work_tdr,
375 hdev->timeout_jiffies);
378 spin_unlock(&hdev->cs_mirror_lock);
381 out:
382 /* Must be called before hl_ctx_put because inside we use ctx to get
383 * the device
385 hl_debugfs_remove_cs(cs);
387 hl_ctx_put(cs->ctx);
389 /* We need to mark an error for not submitted because in that case
390 * the hl fence release flow is different. Mainly, we don't need
391 * to handle hw_sob for signal/wait
393 if (cs->timedout)
394 cs->fence->error = -ETIMEDOUT;
395 else if (cs->aborted)
396 cs->fence->error = -EIO;
397 else if (!cs->submitted)
398 cs->fence->error = -EBUSY;
400 if (cs->timestamp)
401 cs->fence->timestamp = ktime_get();
402 complete_all(&cs->fence->completion);
403 hl_fence_put(cs->fence);
405 kfree(cs->jobs_in_queue_cnt);
406 kfree(cs);
409 static void cs_timedout(struct work_struct *work)
411 struct hl_device *hdev;
412 int rc;
413 struct hl_cs *cs = container_of(work, struct hl_cs,
414 work_tdr.work);
415 rc = cs_get_unless_zero(cs);
416 if (!rc)
417 return;
419 if ((!cs->submitted) || (cs->completed)) {
420 cs_put(cs);
421 return;
424 /* Mark the CS is timed out so we won't try to cancel its TDR */
425 cs->timedout = true;
427 hdev = cs->ctx->hdev;
429 switch (cs->type) {
430 case CS_TYPE_SIGNAL:
431 dev_err(hdev->dev,
432 "Signal command submission %llu has not finished in time!\n",
433 cs->sequence);
434 break;
436 case CS_TYPE_WAIT:
437 dev_err(hdev->dev,
438 "Wait command submission %llu has not finished in time!\n",
439 cs->sequence);
440 break;
442 case CS_TYPE_COLLECTIVE_WAIT:
443 dev_err(hdev->dev,
444 "Collective Wait command submission %llu has not finished in time!\n",
445 cs->sequence);
446 break;
448 default:
449 dev_err(hdev->dev,
450 "Command submission %llu has not finished in time!\n",
451 cs->sequence);
452 break;
455 cs_put(cs);
457 if (hdev->reset_on_lockup)
458 hl_device_reset(hdev, false, false);
459 else
460 hdev->needs_reset = true;
463 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
464 enum hl_cs_type cs_type, struct hl_cs **cs_new)
466 struct hl_cs_counters_atomic *cntr;
467 struct hl_fence *other = NULL;
468 struct hl_cs_compl *cs_cmpl;
469 struct hl_cs *cs;
470 int rc;
472 cntr = &hdev->aggregated_cs_counters;
474 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
475 if (!cs) {
476 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
477 atomic64_inc(&cntr->out_of_mem_drop_cnt);
478 return -ENOMEM;
481 cs->ctx = ctx;
482 cs->submitted = false;
483 cs->completed = false;
484 cs->type = cs_type;
485 INIT_LIST_HEAD(&cs->job_list);
486 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
487 kref_init(&cs->refcount);
488 spin_lock_init(&cs->job_lock);
490 cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
491 if (!cs_cmpl) {
492 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
493 atomic64_inc(&cntr->out_of_mem_drop_cnt);
494 rc = -ENOMEM;
495 goto free_cs;
498 cs_cmpl->hdev = hdev;
499 cs_cmpl->type = cs->type;
500 spin_lock_init(&cs_cmpl->lock);
501 cs->fence = &cs_cmpl->base_fence;
503 spin_lock(&ctx->cs_lock);
505 cs_cmpl->cs_seq = ctx->cs_sequence;
506 other = ctx->cs_pending[cs_cmpl->cs_seq &
507 (hdev->asic_prop.max_pending_cs - 1)];
509 if (other && !completion_done(&other->completion)) {
510 dev_dbg_ratelimited(hdev->dev,
511 "Rejecting CS because of too many in-flights CS\n");
512 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
513 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
514 rc = -EAGAIN;
515 goto free_fence;
518 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
519 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
520 if (!cs->jobs_in_queue_cnt) {
521 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
522 atomic64_inc(&cntr->out_of_mem_drop_cnt);
523 rc = -ENOMEM;
524 goto free_fence;
527 /* init hl_fence */
528 hl_fence_init(&cs_cmpl->base_fence);
530 cs->sequence = cs_cmpl->cs_seq;
532 ctx->cs_pending[cs_cmpl->cs_seq &
533 (hdev->asic_prop.max_pending_cs - 1)] =
534 &cs_cmpl->base_fence;
535 ctx->cs_sequence++;
537 hl_fence_get(&cs_cmpl->base_fence);
539 hl_fence_put(other);
541 spin_unlock(&ctx->cs_lock);
543 *cs_new = cs;
545 return 0;
547 free_fence:
548 spin_unlock(&ctx->cs_lock);
549 kfree(cs_cmpl);
550 free_cs:
551 kfree(cs);
552 return rc;
555 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
557 struct hl_cs_job *job, *tmp;
559 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
560 complete_job(hdev, job);
563 void hl_cs_rollback_all(struct hl_device *hdev)
565 int i;
566 struct hl_cs *cs, *tmp;
568 /* flush all completions */
569 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
570 flush_workqueue(hdev->cq_wq[i]);
572 /* Make sure we don't have leftovers in the CS mirror list */
573 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
574 cs_get(cs);
575 cs->aborted = true;
576 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
577 cs->ctx->asid, cs->sequence);
578 cs_rollback(hdev, cs);
579 cs_put(cs);
583 static void job_wq_completion(struct work_struct *work)
585 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
586 finish_work);
587 struct hl_cs *cs = job->cs;
588 struct hl_device *hdev = cs->ctx->hdev;
590 /* job is no longer needed */
591 complete_job(hdev, job);
594 static int validate_queue_index(struct hl_device *hdev,
595 struct hl_cs_chunk *chunk,
596 enum hl_queue_type *queue_type,
597 bool *is_kernel_allocated_cb)
599 struct asic_fixed_properties *asic = &hdev->asic_prop;
600 struct hw_queue_properties *hw_queue_prop;
602 /* This must be checked here to prevent out-of-bounds access to
603 * hw_queues_props array
605 if (chunk->queue_index >= asic->max_queues) {
606 dev_err(hdev->dev, "Queue index %d is invalid\n",
607 chunk->queue_index);
608 return -EINVAL;
611 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
613 if (hw_queue_prop->type == QUEUE_TYPE_NA) {
614 dev_err(hdev->dev, "Queue index %d is invalid\n",
615 chunk->queue_index);
616 return -EINVAL;
619 if (hw_queue_prop->driver_only) {
620 dev_err(hdev->dev,
621 "Queue index %d is restricted for the kernel driver\n",
622 chunk->queue_index);
623 return -EINVAL;
626 /* When hw queue type isn't QUEUE_TYPE_HW,
627 * USER_ALLOC_CB flag shall be referred as "don't care".
629 if (hw_queue_prop->type == QUEUE_TYPE_HW) {
630 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
631 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
632 dev_err(hdev->dev,
633 "Queue index %d doesn't support user CB\n",
634 chunk->queue_index);
635 return -EINVAL;
638 *is_kernel_allocated_cb = false;
639 } else {
640 if (!(hw_queue_prop->cb_alloc_flags &
641 CB_ALLOC_KERNEL)) {
642 dev_err(hdev->dev,
643 "Queue index %d doesn't support kernel CB\n",
644 chunk->queue_index);
645 return -EINVAL;
648 *is_kernel_allocated_cb = true;
650 } else {
651 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
652 & CB_ALLOC_KERNEL);
655 *queue_type = hw_queue_prop->type;
656 return 0;
659 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
660 struct hl_cb_mgr *cb_mgr,
661 struct hl_cs_chunk *chunk)
663 struct hl_cb *cb;
664 u32 cb_handle;
666 cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
668 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
669 if (!cb) {
670 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
671 return NULL;
674 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
675 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
676 goto release_cb;
679 atomic_inc(&cb->cs_cnt);
681 return cb;
683 release_cb:
684 hl_cb_put(cb);
685 return NULL;
688 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
689 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
691 struct hl_cs_job *job;
693 job = kzalloc(sizeof(*job), GFP_ATOMIC);
694 if (!job)
695 return NULL;
697 kref_init(&job->refcount);
698 job->queue_type = queue_type;
699 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
701 if (is_cb_patched(hdev, job))
702 INIT_LIST_HEAD(&job->userptr_list);
704 if (job->queue_type == QUEUE_TYPE_EXT)
705 INIT_WORK(&job->finish_work, job_wq_completion);
707 return job;
710 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
712 if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
713 return CS_TYPE_SIGNAL;
714 else if (cs_type_flags & HL_CS_FLAGS_WAIT)
715 return CS_TYPE_WAIT;
716 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
717 return CS_TYPE_COLLECTIVE_WAIT;
718 else
719 return CS_TYPE_DEFAULT;
722 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
724 struct hl_device *hdev = hpriv->hdev;
725 struct hl_ctx *ctx = hpriv->ctx;
726 u32 cs_type_flags, num_chunks;
727 enum hl_device_status status;
728 enum hl_cs_type cs_type;
730 if (!hl_device_operational(hdev, &status)) {
731 dev_warn_ratelimited(hdev->dev,
732 "Device is %s. Can't submit new CS\n",
733 hdev->status[status]);
734 return -EBUSY;
737 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
739 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
740 dev_err(hdev->dev,
741 "CS type flags are mutually exclusive, context %d\n",
742 ctx->asid);
743 return -EINVAL;
746 cs_type = hl_cs_get_cs_type(cs_type_flags);
747 num_chunks = args->in.num_chunks_execute;
749 if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
750 !hdev->supports_sync_stream)) {
751 dev_err(hdev->dev, "Sync stream CS is not supported\n");
752 return -EINVAL;
755 if (cs_type == CS_TYPE_DEFAULT) {
756 if (!num_chunks) {
757 dev_err(hdev->dev,
758 "Got execute CS with 0 chunks, context %d\n",
759 ctx->asid);
760 return -EINVAL;
762 } else if (num_chunks != 1) {
763 dev_err(hdev->dev,
764 "Sync stream CS mandates one chunk only, context %d\n",
765 ctx->asid);
766 return -EINVAL;
769 return 0;
772 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
773 struct hl_cs_chunk **cs_chunk_array,
774 void __user *chunks, u32 num_chunks,
775 struct hl_ctx *ctx)
777 u32 size_to_copy;
779 if (num_chunks > HL_MAX_JOBS_PER_CS) {
780 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
781 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
782 dev_err(hdev->dev,
783 "Number of chunks can NOT be larger than %d\n",
784 HL_MAX_JOBS_PER_CS);
785 return -EINVAL;
788 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
789 GFP_ATOMIC);
790 if (!*cs_chunk_array) {
791 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
792 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
793 return -ENOMEM;
796 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
797 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
798 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
799 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
800 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
801 kfree(*cs_chunk_array);
802 return -EFAULT;
805 return 0;
808 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
809 u32 num_chunks, u64 *cs_seq, bool timestamp)
811 bool int_queues_only = true;
812 struct hl_device *hdev = hpriv->hdev;
813 struct hl_cs_chunk *cs_chunk_array;
814 struct hl_cs_counters_atomic *cntr;
815 struct hl_ctx *ctx = hpriv->ctx;
816 struct hl_cs_job *job;
817 struct hl_cs *cs;
818 struct hl_cb *cb;
819 int rc, i;
821 cntr = &hdev->aggregated_cs_counters;
822 *cs_seq = ULLONG_MAX;
824 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
825 hpriv->ctx);
826 if (rc)
827 goto out;
829 /* increment refcnt for context */
830 hl_ctx_get(hdev, hpriv->ctx);
832 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
833 if (rc) {
834 hl_ctx_put(hpriv->ctx);
835 goto free_cs_chunk_array;
838 cs->timestamp = !!timestamp;
839 *cs_seq = cs->sequence;
841 hl_debugfs_add_cs(cs);
843 /* Validate ALL the CS chunks before submitting the CS */
844 for (i = 0 ; i < num_chunks ; i++) {
845 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
846 enum hl_queue_type queue_type;
847 bool is_kernel_allocated_cb;
849 rc = validate_queue_index(hdev, chunk, &queue_type,
850 &is_kernel_allocated_cb);
851 if (rc) {
852 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
853 atomic64_inc(&cntr->validation_drop_cnt);
854 goto free_cs_object;
857 if (is_kernel_allocated_cb) {
858 cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
859 if (!cb) {
860 atomic64_inc(
861 &ctx->cs_counters.validation_drop_cnt);
862 atomic64_inc(&cntr->validation_drop_cnt);
863 rc = -EINVAL;
864 goto free_cs_object;
866 } else {
867 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
870 if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
871 int_queues_only = false;
873 job = hl_cs_allocate_job(hdev, queue_type,
874 is_kernel_allocated_cb);
875 if (!job) {
876 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
877 atomic64_inc(&cntr->out_of_mem_drop_cnt);
878 dev_err(hdev->dev, "Failed to allocate a new job\n");
879 rc = -ENOMEM;
880 if (is_kernel_allocated_cb)
881 goto release_cb;
883 goto free_cs_object;
886 job->id = i + 1;
887 job->cs = cs;
888 job->user_cb = cb;
889 job->user_cb_size = chunk->cb_size;
890 job->hw_queue_id = chunk->queue_index;
892 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
894 list_add_tail(&job->cs_node, &cs->job_list);
897 * Increment CS reference. When CS reference is 0, CS is
898 * done and can be signaled to user and free all its resources
899 * Only increment for JOB on external or H/W queues, because
900 * only for those JOBs we get completion
902 if (job->queue_type == QUEUE_TYPE_EXT ||
903 job->queue_type == QUEUE_TYPE_HW)
904 cs_get(cs);
906 hl_debugfs_add_job(hdev, job);
908 rc = cs_parser(hpriv, job);
909 if (rc) {
910 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
911 atomic64_inc(&cntr->parsing_drop_cnt);
912 dev_err(hdev->dev,
913 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
914 cs->ctx->asid, cs->sequence, job->id, rc);
915 goto free_cs_object;
919 if (int_queues_only) {
920 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
921 atomic64_inc(&cntr->validation_drop_cnt);
922 dev_err(hdev->dev,
923 "Reject CS %d.%llu because only internal queues jobs are present\n",
924 cs->ctx->asid, cs->sequence);
925 rc = -EINVAL;
926 goto free_cs_object;
929 rc = hl_hw_queue_schedule_cs(cs);
930 if (rc) {
931 if (rc != -EAGAIN)
932 dev_err(hdev->dev,
933 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
934 cs->ctx->asid, cs->sequence, rc);
935 goto free_cs_object;
938 rc = HL_CS_STATUS_SUCCESS;
939 goto put_cs;
941 release_cb:
942 atomic_dec(&cb->cs_cnt);
943 hl_cb_put(cb);
944 free_cs_object:
945 cs_rollback(hdev, cs);
946 *cs_seq = ULLONG_MAX;
947 /* The path below is both for good and erroneous exits */
948 put_cs:
949 /* We finished with the CS in this function, so put the ref */
950 cs_put(cs);
951 free_cs_chunk_array:
952 kfree(cs_chunk_array);
953 out:
954 return rc;
957 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
958 u64 *cs_seq)
960 struct hl_device *hdev = hpriv->hdev;
961 struct hl_ctx *ctx = hpriv->ctx;
962 bool need_soft_reset = false;
963 int rc = 0, do_ctx_switch;
964 void __user *chunks;
965 u32 num_chunks, tmp;
966 int ret;
968 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
970 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
971 mutex_lock(&hpriv->restore_phase_mutex);
973 if (do_ctx_switch) {
974 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
975 if (rc) {
976 dev_err_ratelimited(hdev->dev,
977 "Failed to switch to context %d, rejecting CS! %d\n",
978 ctx->asid, rc);
980 * If we timedout, or if the device is not IDLE
981 * while we want to do context-switch (-EBUSY),
982 * we need to soft-reset because QMAN is
983 * probably stuck. However, we can't call to
984 * reset here directly because of deadlock, so
985 * need to do it at the very end of this
986 * function
988 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
989 need_soft_reset = true;
990 mutex_unlock(&hpriv->restore_phase_mutex);
991 goto out;
995 hdev->asic_funcs->restore_phase_topology(hdev);
997 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
998 num_chunks = args->in.num_chunks_restore;
1000 if (!num_chunks) {
1001 dev_dbg(hdev->dev,
1002 "Need to run restore phase but restore CS is empty\n");
1003 rc = 0;
1004 } else {
1005 rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1006 cs_seq, false);
1009 mutex_unlock(&hpriv->restore_phase_mutex);
1011 if (rc) {
1012 dev_err(hdev->dev,
1013 "Failed to submit restore CS for context %d (%d)\n",
1014 ctx->asid, rc);
1015 goto out;
1018 /* Need to wait for restore completion before execution phase */
1019 if (num_chunks) {
1020 enum hl_cs_wait_status status;
1021 wait_again:
1022 ret = _hl_cs_wait_ioctl(hdev, ctx,
1023 jiffies_to_usecs(hdev->timeout_jiffies),
1024 *cs_seq, &status, NULL);
1025 if (ret) {
1026 if (ret == -ERESTARTSYS) {
1027 usleep_range(100, 200);
1028 goto wait_again;
1031 dev_err(hdev->dev,
1032 "Restore CS for context %d failed to complete %d\n",
1033 ctx->asid, ret);
1034 rc = -ENOEXEC;
1035 goto out;
1039 ctx->thread_ctx_switch_wait_token = 1;
1041 } else if (!ctx->thread_ctx_switch_wait_token) {
1042 rc = hl_poll_timeout_memory(hdev,
1043 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1044 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1046 if (rc == -ETIMEDOUT) {
1047 dev_err(hdev->dev,
1048 "context switch phase timeout (%d)\n", tmp);
1049 goto out;
1053 out:
1054 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1055 hl_device_reset(hdev, false, false);
1057 return rc;
1060 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1061 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
1063 u64 *signal_seq_arr = NULL;
1064 u32 size_to_copy, signal_seq_arr_len;
1065 int rc = 0;
1067 signal_seq_arr_len = chunk->num_signal_seq_arr;
1069 /* currently only one signal seq is supported */
1070 if (signal_seq_arr_len != 1) {
1071 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1072 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1073 dev_err(hdev->dev,
1074 "Wait for signal CS supports only one signal CS seq\n");
1075 return -EINVAL;
1078 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1079 sizeof(*signal_seq_arr),
1080 GFP_ATOMIC);
1081 if (!signal_seq_arr) {
1082 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1083 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1084 return -ENOMEM;
1087 size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
1088 if (copy_from_user(signal_seq_arr,
1089 u64_to_user_ptr(chunk->signal_seq_arr),
1090 size_to_copy)) {
1091 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1092 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1093 dev_err(hdev->dev,
1094 "Failed to copy signal seq array from user\n");
1095 rc = -EFAULT;
1096 goto out;
1099 /* currently it is guaranteed to have only one signal seq */
1100 *signal_seq = signal_seq_arr[0];
1102 out:
1103 kfree(signal_seq_arr);
1105 return rc;
1108 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1109 struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type,
1110 u32 q_idx)
1112 struct hl_cs_counters_atomic *cntr;
1113 struct hl_cs_job *job;
1114 struct hl_cb *cb;
1115 u32 cb_size;
1117 cntr = &hdev->aggregated_cs_counters;
1119 job = hl_cs_allocate_job(hdev, q_type, true);
1120 if (!job) {
1121 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1122 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1123 dev_err(hdev->dev, "Failed to allocate a new job\n");
1124 return -ENOMEM;
1127 if (cs->type == CS_TYPE_WAIT)
1128 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1129 else
1130 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1132 cb = hl_cb_kernel_create(hdev, cb_size,
1133 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1134 if (!cb) {
1135 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1136 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1137 kfree(job);
1138 return -EFAULT;
1141 job->id = 0;
1142 job->cs = cs;
1143 job->user_cb = cb;
1144 atomic_inc(&job->user_cb->cs_cnt);
1145 job->user_cb_size = cb_size;
1146 job->hw_queue_id = q_idx;
1149 * No need in parsing, user CB is the patched CB.
1150 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1151 * the CB idr anymore and to decrement its refcount as it was
1152 * incremented inside hl_cb_kernel_create().
1154 job->patched_cb = job->user_cb;
1155 job->job_cb_size = job->user_cb_size;
1156 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1158 /* increment refcount as for external queues we get completion */
1159 cs_get(cs);
1161 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1163 list_add_tail(&job->cs_node, &cs->job_list);
1165 hl_debugfs_add_job(hdev, job);
1167 return 0;
1170 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
1171 void __user *chunks, u32 num_chunks,
1172 u64 *cs_seq, bool timestamp)
1174 struct hl_cs_chunk *cs_chunk_array, *chunk;
1175 struct hw_queue_properties *hw_queue_prop;
1176 struct hl_device *hdev = hpriv->hdev;
1177 struct hl_cs_compl *sig_waitcs_cmpl;
1178 u32 q_idx, collective_engine_id = 0;
1179 struct hl_cs_counters_atomic *cntr;
1180 struct hl_fence *sig_fence = NULL;
1181 struct hl_ctx *ctx = hpriv->ctx;
1182 enum hl_queue_type q_type;
1183 struct hl_cs *cs;
1184 u64 signal_seq;
1185 int rc;
1187 cntr = &hdev->aggregated_cs_counters;
1188 *cs_seq = ULLONG_MAX;
1190 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1191 ctx);
1192 if (rc)
1193 goto out;
1195 /* currently it is guaranteed to have only one chunk */
1196 chunk = &cs_chunk_array[0];
1198 if (chunk->queue_index >= hdev->asic_prop.max_queues) {
1199 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1200 atomic64_inc(&cntr->validation_drop_cnt);
1201 dev_err(hdev->dev, "Queue index %d is invalid\n",
1202 chunk->queue_index);
1203 rc = -EINVAL;
1204 goto free_cs_chunk_array;
1207 q_idx = chunk->queue_index;
1208 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1209 q_type = hw_queue_prop->type;
1211 if (!hw_queue_prop->supports_sync_stream) {
1212 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1213 atomic64_inc(&cntr->validation_drop_cnt);
1214 dev_err(hdev->dev,
1215 "Queue index %d does not support sync stream operations\n",
1216 q_idx);
1217 rc = -EINVAL;
1218 goto free_cs_chunk_array;
1221 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1222 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
1223 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1224 atomic64_inc(&cntr->validation_drop_cnt);
1225 dev_err(hdev->dev,
1226 "Queue index %d is invalid\n", q_idx);
1227 rc = -EINVAL;
1228 goto free_cs_chunk_array;
1231 collective_engine_id = chunk->collective_engine_id;
1234 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1235 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
1236 if (rc)
1237 goto free_cs_chunk_array;
1239 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
1240 if (IS_ERR(sig_fence)) {
1241 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1242 atomic64_inc(&cntr->validation_drop_cnt);
1243 dev_err(hdev->dev,
1244 "Failed to get signal CS with seq 0x%llx\n",
1245 signal_seq);
1246 rc = PTR_ERR(sig_fence);
1247 goto free_cs_chunk_array;
1250 if (!sig_fence) {
1251 /* signal CS already finished */
1252 rc = 0;
1253 goto free_cs_chunk_array;
1256 sig_waitcs_cmpl =
1257 container_of(sig_fence, struct hl_cs_compl, base_fence);
1259 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
1260 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1261 atomic64_inc(&cntr->validation_drop_cnt);
1262 dev_err(hdev->dev,
1263 "CS seq 0x%llx is not of a signal CS\n",
1264 signal_seq);
1265 hl_fence_put(sig_fence);
1266 rc = -EINVAL;
1267 goto free_cs_chunk_array;
1270 if (completion_done(&sig_fence->completion)) {
1271 /* signal CS already finished */
1272 hl_fence_put(sig_fence);
1273 rc = 0;
1274 goto free_cs_chunk_array;
1278 /* increment refcnt for context */
1279 hl_ctx_get(hdev, ctx);
1281 rc = allocate_cs(hdev, ctx, cs_type, &cs);
1282 if (rc) {
1283 if (cs_type == CS_TYPE_WAIT ||
1284 cs_type == CS_TYPE_COLLECTIVE_WAIT)
1285 hl_fence_put(sig_fence);
1286 hl_ctx_put(ctx);
1287 goto free_cs_chunk_array;
1290 cs->timestamp = !!timestamp;
1293 * Save the signal CS fence for later initialization right before
1294 * hanging the wait CS on the queue.
1296 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT)
1297 cs->signal_fence = sig_fence;
1299 hl_debugfs_add_cs(cs);
1301 *cs_seq = cs->sequence;
1303 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
1304 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
1305 q_idx);
1306 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
1307 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
1308 cs, q_idx, collective_engine_id);
1309 else {
1310 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1311 atomic64_inc(&cntr->validation_drop_cnt);
1312 rc = -EINVAL;
1315 if (rc)
1316 goto free_cs_object;
1318 rc = hl_hw_queue_schedule_cs(cs);
1319 if (rc) {
1320 if (rc != -EAGAIN)
1321 dev_err(hdev->dev,
1322 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1323 ctx->asid, cs->sequence, rc);
1324 goto free_cs_object;
1327 rc = HL_CS_STATUS_SUCCESS;
1328 goto put_cs;
1330 free_cs_object:
1331 cs_rollback(hdev, cs);
1332 *cs_seq = ULLONG_MAX;
1333 /* The path below is both for good and erroneous exits */
1334 put_cs:
1335 /* We finished with the CS in this function, so put the ref */
1336 cs_put(cs);
1337 free_cs_chunk_array:
1338 kfree(cs_chunk_array);
1339 out:
1340 return rc;
1343 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
1345 union hl_cs_args *args = data;
1346 enum hl_cs_type cs_type;
1347 u64 cs_seq = ULONG_MAX;
1348 void __user *chunks;
1349 u32 num_chunks;
1350 int rc;
1352 rc = hl_cs_sanity_checks(hpriv, args);
1353 if (rc)
1354 goto out;
1356 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
1357 if (rc)
1358 goto out;
1360 cs_type = hl_cs_get_cs_type(args->in.cs_flags &
1361 ~HL_CS_FLAGS_FORCE_RESTORE);
1362 chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
1363 num_chunks = args->in.num_chunks_execute;
1365 switch (cs_type) {
1366 case CS_TYPE_SIGNAL:
1367 case CS_TYPE_WAIT:
1368 case CS_TYPE_COLLECTIVE_WAIT:
1369 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
1370 &cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
1371 break;
1372 default:
1373 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
1374 args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
1375 break;
1378 out:
1379 if (rc != -EAGAIN) {
1380 memset(args, 0, sizeof(*args));
1381 args->out.status = rc;
1382 args->out.seq = cs_seq;
1385 return rc;
1388 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
1389 u64 timeout_us, u64 seq,
1390 enum hl_cs_wait_status *status, s64 *timestamp)
1392 struct hl_fence *fence;
1393 unsigned long timeout;
1394 int rc = 0;
1395 long completion_rc;
1397 if (timestamp)
1398 *timestamp = 0;
1400 if (timeout_us == MAX_SCHEDULE_TIMEOUT)
1401 timeout = timeout_us;
1402 else
1403 timeout = usecs_to_jiffies(timeout_us);
1405 hl_ctx_get(hdev, ctx);
1407 fence = hl_ctx_get_fence(ctx, seq);
1408 if (IS_ERR(fence)) {
1409 rc = PTR_ERR(fence);
1410 if (rc == -EINVAL)
1411 dev_notice_ratelimited(hdev->dev,
1412 "Can't wait on CS %llu because current CS is at seq %llu\n",
1413 seq, ctx->cs_sequence);
1414 } else if (fence) {
1415 if (!timeout_us)
1416 completion_rc = completion_done(&fence->completion);
1417 else
1418 completion_rc =
1419 wait_for_completion_interruptible_timeout(
1420 &fence->completion, timeout);
1422 if (completion_rc > 0) {
1423 *status = CS_WAIT_STATUS_COMPLETED;
1424 if (timestamp)
1425 *timestamp = ktime_to_ns(fence->timestamp);
1426 } else {
1427 *status = CS_WAIT_STATUS_BUSY;
1430 if (fence->error == -ETIMEDOUT)
1431 rc = -ETIMEDOUT;
1432 else if (fence->error == -EIO)
1433 rc = -EIO;
1435 hl_fence_put(fence);
1436 } else {
1437 dev_dbg(hdev->dev,
1438 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
1439 seq, ctx->cs_sequence);
1440 *status = CS_WAIT_STATUS_GONE;
1443 hl_ctx_put(ctx);
1445 return rc;
1448 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
1450 struct hl_device *hdev = hpriv->hdev;
1451 union hl_wait_cs_args *args = data;
1452 enum hl_cs_wait_status status;
1453 u64 seq = args->in.seq;
1454 s64 timestamp;
1455 int rc;
1457 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
1458 &status, &timestamp);
1460 memset(args, 0, sizeof(*args));
1462 if (rc) {
1463 if (rc == -ERESTARTSYS) {
1464 dev_err_ratelimited(hdev->dev,
1465 "user process got signal while waiting for CS handle %llu\n",
1466 seq);
1467 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
1468 rc = -EINTR;
1469 } else if (rc == -ETIMEDOUT) {
1470 dev_err_ratelimited(hdev->dev,
1471 "CS %llu has timed-out while user process is waiting for it\n",
1472 seq);
1473 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
1474 } else if (rc == -EIO) {
1475 dev_err_ratelimited(hdev->dev,
1476 "CS %llu has been aborted while user process is waiting for it\n",
1477 seq);
1478 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
1480 return rc;
1483 if (timestamp) {
1484 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
1485 args->out.timestamp_nsec = timestamp;
1488 switch (status) {
1489 case CS_WAIT_STATUS_GONE:
1490 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
1491 fallthrough;
1492 case CS_WAIT_STATUS_COMPLETED:
1493 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
1494 break;
1495 case CS_WAIT_STATUS_BUSY:
1496 default:
1497 args->out.status = HL_WAIT_CS_STATUS_BUSY;
1498 break;
1501 return 0;