2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
29 #include <linux/sched/signal.h>
31 #include "uapi/drm/vc4_drm.h"
34 #include "vc4_trace.h"
37 vc4_queue_hangcheck(struct drm_device
*dev
)
39 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
41 mod_timer(&vc4
->hangcheck
.timer
,
42 round_jiffies_up(jiffies
+ msecs_to_jiffies(100)));
45 struct vc4_hang_state
{
46 struct drm_vc4_get_hang_state user_state
;
49 struct drm_gem_object
**bo
;
53 vc4_free_hang_state(struct drm_device
*dev
, struct vc4_hang_state
*state
)
57 for (i
= 0; i
< state
->user_state
.bo_count
; i
++)
58 drm_gem_object_unreference_unlocked(state
->bo
[i
]);
64 vc4_get_hang_state_ioctl(struct drm_device
*dev
, void *data
,
65 struct drm_file
*file_priv
)
67 struct drm_vc4_get_hang_state
*get_state
= data
;
68 struct drm_vc4_get_hang_state_bo
*bo_state
;
69 struct vc4_hang_state
*kernel_state
;
70 struct drm_vc4_get_hang_state
*state
;
71 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
72 unsigned long irqflags
;
76 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
77 kernel_state
= vc4
->hang_state
;
79 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
82 state
= &kernel_state
->user_state
;
84 /* If the user's array isn't big enough, just return the
85 * required array size.
87 if (get_state
->bo_count
< state
->bo_count
) {
88 get_state
->bo_count
= state
->bo_count
;
89 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
93 vc4
->hang_state
= NULL
;
94 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
96 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
97 state
->bo
= get_state
->bo
;
98 memcpy(get_state
, state
, sizeof(*state
));
100 bo_state
= kcalloc(state
->bo_count
, sizeof(*bo_state
), GFP_KERNEL
);
106 for (i
= 0; i
< state
->bo_count
; i
++) {
107 struct vc4_bo
*vc4_bo
= to_vc4_bo(kernel_state
->bo
[i
]);
110 ret
= drm_gem_handle_create(file_priv
, kernel_state
->bo
[i
],
115 goto err_delete_handle
;
117 bo_state
[i
].handle
= handle
;
118 bo_state
[i
].paddr
= vc4_bo
->base
.paddr
;
119 bo_state
[i
].size
= vc4_bo
->base
.base
.size
;
122 if (copy_to_user((void __user
*)(uintptr_t)get_state
->bo
,
124 state
->bo_count
* sizeof(*bo_state
)))
129 for (i
= 0; i
< state
->bo_count
; i
++)
130 drm_gem_handle_delete(file_priv
, bo_state
[i
].handle
);
134 vc4_free_hang_state(dev
, kernel_state
);
141 vc4_save_hang_state(struct drm_device
*dev
)
143 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
144 struct drm_vc4_get_hang_state
*state
;
145 struct vc4_hang_state
*kernel_state
;
146 struct vc4_exec_info
*exec
[2];
148 unsigned long irqflags
;
149 unsigned int i
, j
, unref_list_count
, prev_idx
;
151 kernel_state
= kcalloc(1, sizeof(*kernel_state
), GFP_KERNEL
);
155 state
= &kernel_state
->user_state
;
157 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
158 exec
[0] = vc4_first_bin_job(vc4
);
159 exec
[1] = vc4_first_render_job(vc4
);
160 if (!exec
[0] && !exec
[1]) {
161 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
165 /* Get the bos from both binner and renderer into hang state. */
167 for (i
= 0; i
< 2; i
++) {
171 unref_list_count
= 0;
172 list_for_each_entry(bo
, &exec
[i
]->unref_list
, unref_head
)
174 state
->bo_count
+= exec
[i
]->bo_count
+ unref_list_count
;
177 kernel_state
->bo
= kcalloc(state
->bo_count
,
178 sizeof(*kernel_state
->bo
), GFP_ATOMIC
);
180 if (!kernel_state
->bo
) {
181 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
186 for (i
= 0; i
< 2; i
++) {
190 for (j
= 0; j
< exec
[i
]->bo_count
; j
++) {
191 drm_gem_object_reference(&exec
[i
]->bo
[j
]->base
);
192 kernel_state
->bo
[j
+ prev_idx
] = &exec
[i
]->bo
[j
]->base
;
195 list_for_each_entry(bo
, &exec
[i
]->unref_list
, unref_head
) {
196 drm_gem_object_reference(&bo
->base
.base
);
197 kernel_state
->bo
[j
+ prev_idx
] = &bo
->base
.base
;
204 state
->start_bin
= exec
[0]->ct0ca
;
206 state
->start_render
= exec
[1]->ct1ca
;
208 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
210 state
->ct0ca
= V3D_READ(V3D_CTNCA(0));
211 state
->ct0ea
= V3D_READ(V3D_CTNEA(0));
213 state
->ct1ca
= V3D_READ(V3D_CTNCA(1));
214 state
->ct1ea
= V3D_READ(V3D_CTNEA(1));
216 state
->ct0cs
= V3D_READ(V3D_CTNCS(0));
217 state
->ct1cs
= V3D_READ(V3D_CTNCS(1));
219 state
->ct0ra0
= V3D_READ(V3D_CT00RA0
);
220 state
->ct1ra0
= V3D_READ(V3D_CT01RA0
);
222 state
->bpca
= V3D_READ(V3D_BPCA
);
223 state
->bpcs
= V3D_READ(V3D_BPCS
);
224 state
->bpoa
= V3D_READ(V3D_BPOA
);
225 state
->bpos
= V3D_READ(V3D_BPOS
);
227 state
->vpmbase
= V3D_READ(V3D_VPMBASE
);
229 state
->dbge
= V3D_READ(V3D_DBGE
);
230 state
->fdbgo
= V3D_READ(V3D_FDBGO
);
231 state
->fdbgb
= V3D_READ(V3D_FDBGB
);
232 state
->fdbgr
= V3D_READ(V3D_FDBGR
);
233 state
->fdbgs
= V3D_READ(V3D_FDBGS
);
234 state
->errstat
= V3D_READ(V3D_ERRSTAT
);
236 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
237 if (vc4
->hang_state
) {
238 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
239 vc4_free_hang_state(dev
, kernel_state
);
241 vc4
->hang_state
= kernel_state
;
242 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
247 vc4_reset(struct drm_device
*dev
)
249 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
251 DRM_INFO("Resetting GPU.\n");
253 mutex_lock(&vc4
->power_lock
);
254 if (vc4
->power_refcount
) {
255 /* Power the device off and back on the by dropping the
256 * reference on runtime PM.
258 pm_runtime_put_sync_suspend(&vc4
->v3d
->pdev
->dev
);
259 pm_runtime_get_sync(&vc4
->v3d
->pdev
->dev
);
261 mutex_unlock(&vc4
->power_lock
);
265 /* Rearm the hangcheck -- another job might have been waiting
266 * for our hung one to get kicked off, and vc4_irq_reset()
267 * would have started it.
269 vc4_queue_hangcheck(dev
);
273 vc4_reset_work(struct work_struct
*work
)
275 struct vc4_dev
*vc4
=
276 container_of(work
, struct vc4_dev
, hangcheck
.reset_work
);
278 vc4_save_hang_state(vc4
->dev
);
284 vc4_hangcheck_elapsed(unsigned long data
)
286 struct drm_device
*dev
= (struct drm_device
*)data
;
287 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
288 uint32_t ct0ca
, ct1ca
;
289 unsigned long irqflags
;
290 struct vc4_exec_info
*bin_exec
, *render_exec
;
292 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
294 bin_exec
= vc4_first_bin_job(vc4
);
295 render_exec
= vc4_first_render_job(vc4
);
297 /* If idle, we can stop watching for hangs. */
298 if (!bin_exec
&& !render_exec
) {
299 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
303 ct0ca
= V3D_READ(V3D_CTNCA(0));
304 ct1ca
= V3D_READ(V3D_CTNCA(1));
306 /* If we've made any progress in execution, rearm the timer
309 if ((bin_exec
&& ct0ca
!= bin_exec
->last_ct0ca
) ||
310 (render_exec
&& ct1ca
!= render_exec
->last_ct1ca
)) {
312 bin_exec
->last_ct0ca
= ct0ca
;
314 render_exec
->last_ct1ca
= ct1ca
;
315 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
316 vc4_queue_hangcheck(dev
);
320 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
322 /* We've gone too long with no progress, reset. This has to
323 * be done from a work struct, since resetting can sleep and
324 * this timer hook isn't allowed to.
326 schedule_work(&vc4
->hangcheck
.reset_work
);
330 submit_cl(struct drm_device
*dev
, uint32_t thread
, uint32_t start
, uint32_t end
)
332 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
334 /* Set the current and end address of the control list.
335 * Writing the end register is what starts the job.
337 V3D_WRITE(V3D_CTNCA(thread
), start
);
338 V3D_WRITE(V3D_CTNEA(thread
), end
);
342 vc4_wait_for_seqno(struct drm_device
*dev
, uint64_t seqno
, uint64_t timeout_ns
,
345 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
347 unsigned long timeout_expire
;
350 if (vc4
->finished_seqno
>= seqno
)
356 timeout_expire
= jiffies
+ nsecs_to_jiffies(timeout_ns
);
358 trace_vc4_wait_for_seqno_begin(dev
, seqno
, timeout_ns
);
360 prepare_to_wait(&vc4
->job_wait_queue
, &wait
,
361 interruptible
? TASK_INTERRUPTIBLE
:
362 TASK_UNINTERRUPTIBLE
);
364 if (interruptible
&& signal_pending(current
)) {
369 if (vc4
->finished_seqno
>= seqno
)
372 if (timeout_ns
!= ~0ull) {
373 if (time_after_eq(jiffies
, timeout_expire
)) {
377 schedule_timeout(timeout_expire
- jiffies
);
383 finish_wait(&vc4
->job_wait_queue
, &wait
);
384 trace_vc4_wait_for_seqno_end(dev
, seqno
);
390 vc4_flush_caches(struct drm_device
*dev
)
392 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
394 /* Flush the GPU L2 caches. These caches sit on top of system
395 * L3 (the 128kb or so shared with the CPU), and are
396 * non-allocating in the L3.
398 V3D_WRITE(V3D_L2CACTL
,
401 V3D_WRITE(V3D_SLCACTL
,
402 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC
) |
403 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC
) |
404 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC
) |
405 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC
));
408 /* Sets the registers for the next job to be actually be executed in
411 * The job_lock should be held during this.
414 vc4_submit_next_bin_job(struct drm_device
*dev
)
416 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
417 struct vc4_exec_info
*exec
;
420 exec
= vc4_first_bin_job(vc4
);
424 vc4_flush_caches(dev
);
426 /* Either put the job in the binner if it uses the binner, or
427 * immediately move it to the to-be-rendered queue.
429 if (exec
->ct0ca
!= exec
->ct0ea
) {
430 submit_cl(dev
, 0, exec
->ct0ca
, exec
->ct0ea
);
432 vc4_move_job_to_render(dev
, exec
);
438 vc4_submit_next_render_job(struct drm_device
*dev
)
440 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
441 struct vc4_exec_info
*exec
= vc4_first_render_job(vc4
);
446 submit_cl(dev
, 1, exec
->ct1ca
, exec
->ct1ea
);
450 vc4_move_job_to_render(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
452 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
453 bool was_empty
= list_empty(&vc4
->render_job_list
);
455 list_move_tail(&exec
->head
, &vc4
->render_job_list
);
457 vc4_submit_next_render_job(dev
);
461 vc4_update_bo_seqnos(struct vc4_exec_info
*exec
, uint64_t seqno
)
466 for (i
= 0; i
< exec
->bo_count
; i
++) {
467 bo
= to_vc4_bo(&exec
->bo
[i
]->base
);
470 reservation_object_add_shared_fence(bo
->resv
, exec
->fence
);
473 list_for_each_entry(bo
, &exec
->unref_list
, unref_head
) {
477 for (i
= 0; i
< exec
->rcl_write_bo_count
; i
++) {
478 bo
= to_vc4_bo(&exec
->rcl_write_bo
[i
]->base
);
479 bo
->write_seqno
= seqno
;
481 reservation_object_add_excl_fence(bo
->resv
, exec
->fence
);
486 vc4_unlock_bo_reservations(struct drm_device
*dev
,
487 struct vc4_exec_info
*exec
,
488 struct ww_acquire_ctx
*acquire_ctx
)
492 for (i
= 0; i
< exec
->bo_count
; i
++) {
493 struct vc4_bo
*bo
= to_vc4_bo(&exec
->bo
[i
]->base
);
495 ww_mutex_unlock(&bo
->resv
->lock
);
498 ww_acquire_fini(acquire_ctx
);
501 /* Takes the reservation lock on all the BOs being referenced, so that
502 * at queue submit time we can update the reservations.
504 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
505 * (all of which are on exec->unref_list). They're entirely private
506 * to vc4, so we don't attach dma-buf fences to them.
509 vc4_lock_bo_reservations(struct drm_device
*dev
,
510 struct vc4_exec_info
*exec
,
511 struct ww_acquire_ctx
*acquire_ctx
)
513 int contended_lock
= -1;
517 ww_acquire_init(acquire_ctx
, &reservation_ww_class
);
520 if (contended_lock
!= -1) {
521 bo
= to_vc4_bo(&exec
->bo
[contended_lock
]->base
);
522 ret
= ww_mutex_lock_slow_interruptible(&bo
->resv
->lock
,
525 ww_acquire_done(acquire_ctx
);
530 for (i
= 0; i
< exec
->bo_count
; i
++) {
531 if (i
== contended_lock
)
534 bo
= to_vc4_bo(&exec
->bo
[i
]->base
);
536 ret
= ww_mutex_lock_interruptible(&bo
->resv
->lock
, acquire_ctx
);
540 for (j
= 0; j
< i
; j
++) {
541 bo
= to_vc4_bo(&exec
->bo
[j
]->base
);
542 ww_mutex_unlock(&bo
->resv
->lock
);
545 if (contended_lock
!= -1 && contended_lock
>= i
) {
546 bo
= to_vc4_bo(&exec
->bo
[contended_lock
]->base
);
548 ww_mutex_unlock(&bo
->resv
->lock
);
551 if (ret
== -EDEADLK
) {
556 ww_acquire_done(acquire_ctx
);
561 ww_acquire_done(acquire_ctx
);
563 /* Reserve space for our shared (read-only) fence references,
564 * before we commit the CL to the hardware.
566 for (i
= 0; i
< exec
->bo_count
; i
++) {
567 bo
= to_vc4_bo(&exec
->bo
[i
]->base
);
569 ret
= reservation_object_reserve_shared(bo
->resv
);
571 vc4_unlock_bo_reservations(dev
, exec
, acquire_ctx
);
579 /* Queues a struct vc4_exec_info for execution. If no job is
580 * currently executing, then submits it.
582 * Unlike most GPUs, our hardware only handles one command list at a
583 * time. To queue multiple jobs at once, we'd need to edit the
584 * previous command list to have a jump to the new one at the end, and
585 * then bump the end address. That's a change for a later date,
589 vc4_queue_submit(struct drm_device
*dev
, struct vc4_exec_info
*exec
,
590 struct ww_acquire_ctx
*acquire_ctx
)
592 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
594 unsigned long irqflags
;
595 struct vc4_fence
*fence
;
597 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
602 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
604 seqno
= ++vc4
->emit_seqno
;
607 dma_fence_init(&fence
->base
, &vc4_fence_ops
, &vc4
->job_lock
,
608 vc4
->dma_fence_context
, exec
->seqno
);
609 fence
->seqno
= exec
->seqno
;
610 exec
->fence
= &fence
->base
;
612 vc4_update_bo_seqnos(exec
, seqno
);
614 vc4_unlock_bo_reservations(dev
, exec
, acquire_ctx
);
616 list_add_tail(&exec
->head
, &vc4
->bin_job_list
);
618 /* If no job was executing, kick ours off. Otherwise, it'll
619 * get started when the previous job's flush done interrupt
622 if (vc4_first_bin_job(vc4
) == exec
) {
623 vc4_submit_next_bin_job(dev
);
624 vc4_queue_hangcheck(dev
);
627 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
633 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
634 * referenced by the job.
636 * @file_priv: DRM file for this fd
637 * @exec: V3D job being set up
639 * The command validator needs to reference BOs by their index within
640 * the submitted job's BO list. This does the validation of the job's
641 * BO list and reference counting for the lifetime of the job.
643 * Note that this function doesn't need to unreference the BOs on
644 * failure, because that will happen at vc4_complete_exec() time.
647 vc4_cl_lookup_bos(struct drm_device
*dev
,
648 struct drm_file
*file_priv
,
649 struct vc4_exec_info
*exec
)
651 struct drm_vc4_submit_cl
*args
= exec
->args
;
656 exec
->bo_count
= args
->bo_handle_count
;
658 if (!exec
->bo_count
) {
659 /* See comment on bo_index for why we have to check
662 DRM_ERROR("Rendering requires BOs to validate\n");
666 exec
->bo
= kvmalloc_array(exec
->bo_count
,
667 sizeof(struct drm_gem_cma_object
*),
668 GFP_KERNEL
| __GFP_ZERO
);
670 DRM_ERROR("Failed to allocate validated BO pointers\n");
674 handles
= kvmalloc_array(exec
->bo_count
, sizeof(uint32_t), GFP_KERNEL
);
677 DRM_ERROR("Failed to allocate incoming GEM handles\n");
681 if (copy_from_user(handles
,
682 (void __user
*)(uintptr_t)args
->bo_handles
,
683 exec
->bo_count
* sizeof(uint32_t))) {
685 DRM_ERROR("Failed to copy in GEM handles\n");
689 spin_lock(&file_priv
->table_lock
);
690 for (i
= 0; i
< exec
->bo_count
; i
++) {
691 struct drm_gem_object
*bo
= idr_find(&file_priv
->object_idr
,
694 DRM_ERROR("Failed to look up GEM BO %d: %d\n",
697 spin_unlock(&file_priv
->table_lock
);
700 drm_gem_object_reference(bo
);
701 exec
->bo
[i
] = (struct drm_gem_cma_object
*)bo
;
703 spin_unlock(&file_priv
->table_lock
);
711 vc4_get_bcl(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
713 struct drm_vc4_submit_cl
*args
= exec
->args
;
717 uint32_t bin_offset
= 0;
718 uint32_t shader_rec_offset
= roundup(bin_offset
+ args
->bin_cl_size
,
720 uint32_t uniforms_offset
= shader_rec_offset
+ args
->shader_rec_size
;
721 uint32_t exec_size
= uniforms_offset
+ args
->uniforms_size
;
722 uint32_t temp_size
= exec_size
+ (sizeof(struct vc4_shader_state
) *
723 args
->shader_rec_count
);
726 if (shader_rec_offset
< args
->bin_cl_size
||
727 uniforms_offset
< shader_rec_offset
||
728 exec_size
< uniforms_offset
||
729 args
->shader_rec_count
>= (UINT_MAX
/
730 sizeof(struct vc4_shader_state
)) ||
731 temp_size
< exec_size
) {
732 DRM_ERROR("overflow in exec arguments\n");
737 /* Allocate space where we'll store the copied in user command lists
738 * and shader records.
740 * We don't just copy directly into the BOs because we need to
741 * read the contents back for validation, and I think the
742 * bo->vaddr is uncached access.
744 temp
= kvmalloc_array(temp_size
, 1, GFP_KERNEL
);
746 DRM_ERROR("Failed to allocate storage for copying "
747 "in bin/render CLs.\n");
751 bin
= temp
+ bin_offset
;
752 exec
->shader_rec_u
= temp
+ shader_rec_offset
;
753 exec
->uniforms_u
= temp
+ uniforms_offset
;
754 exec
->shader_state
= temp
+ exec_size
;
755 exec
->shader_state_size
= args
->shader_rec_count
;
757 if (copy_from_user(bin
,
758 (void __user
*)(uintptr_t)args
->bin_cl
,
759 args
->bin_cl_size
)) {
764 if (copy_from_user(exec
->shader_rec_u
,
765 (void __user
*)(uintptr_t)args
->shader_rec
,
766 args
->shader_rec_size
)) {
771 if (copy_from_user(exec
->uniforms_u
,
772 (void __user
*)(uintptr_t)args
->uniforms
,
773 args
->uniforms_size
)) {
778 bo
= vc4_bo_create(dev
, exec_size
, true);
780 DRM_ERROR("Couldn't allocate BO for binning\n");
784 exec
->exec_bo
= &bo
->base
;
786 list_add_tail(&to_vc4_bo(&exec
->exec_bo
->base
)->unref_head
,
789 exec
->ct0ca
= exec
->exec_bo
->paddr
+ bin_offset
;
793 exec
->shader_rec_v
= exec
->exec_bo
->vaddr
+ shader_rec_offset
;
794 exec
->shader_rec_p
= exec
->exec_bo
->paddr
+ shader_rec_offset
;
795 exec
->shader_rec_size
= args
->shader_rec_size
;
797 exec
->uniforms_v
= exec
->exec_bo
->vaddr
+ uniforms_offset
;
798 exec
->uniforms_p
= exec
->exec_bo
->paddr
+ uniforms_offset
;
799 exec
->uniforms_size
= args
->uniforms_size
;
801 ret
= vc4_validate_bin_cl(dev
,
802 exec
->exec_bo
->vaddr
+ bin_offset
,
808 ret
= vc4_validate_shader_recs(dev
, exec
);
812 /* Block waiting on any previous rendering into the CS's VBO,
813 * IB, or textures, so that pixels are actually written by the
814 * time we try to read them.
816 ret
= vc4_wait_for_seqno(dev
, exec
->bin_dep_seqno
, ~0ull, true);
824 vc4_complete_exec(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
826 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
827 unsigned long irqflags
;
830 /* If we got force-completed because of GPU reset rather than
831 * through our IRQ handler, signal the fence now.
834 dma_fence_signal(exec
->fence
);
837 for (i
= 0; i
< exec
->bo_count
; i
++)
838 drm_gem_object_unreference_unlocked(&exec
->bo
[i
]->base
);
842 while (!list_empty(&exec
->unref_list
)) {
843 struct vc4_bo
*bo
= list_first_entry(&exec
->unref_list
,
844 struct vc4_bo
, unref_head
);
845 list_del(&bo
->unref_head
);
846 drm_gem_object_unreference_unlocked(&bo
->base
.base
);
849 /* Free up the allocation of any bin slots we used. */
850 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
851 vc4
->bin_alloc_used
&= ~exec
->bin_slots
;
852 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
854 mutex_lock(&vc4
->power_lock
);
855 if (--vc4
->power_refcount
== 0) {
856 pm_runtime_mark_last_busy(&vc4
->v3d
->pdev
->dev
);
857 pm_runtime_put_autosuspend(&vc4
->v3d
->pdev
->dev
);
859 mutex_unlock(&vc4
->power_lock
);
865 vc4_job_handle_completed(struct vc4_dev
*vc4
)
867 unsigned long irqflags
;
868 struct vc4_seqno_cb
*cb
, *cb_temp
;
870 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
871 while (!list_empty(&vc4
->job_done_list
)) {
872 struct vc4_exec_info
*exec
=
873 list_first_entry(&vc4
->job_done_list
,
874 struct vc4_exec_info
, head
);
875 list_del(&exec
->head
);
877 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
878 vc4_complete_exec(vc4
->dev
, exec
);
879 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
882 list_for_each_entry_safe(cb
, cb_temp
, &vc4
->seqno_cb_list
, work
.entry
) {
883 if (cb
->seqno
<= vc4
->finished_seqno
) {
884 list_del_init(&cb
->work
.entry
);
885 schedule_work(&cb
->work
);
889 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
892 static void vc4_seqno_cb_work(struct work_struct
*work
)
894 struct vc4_seqno_cb
*cb
= container_of(work
, struct vc4_seqno_cb
, work
);
899 int vc4_queue_seqno_cb(struct drm_device
*dev
,
900 struct vc4_seqno_cb
*cb
, uint64_t seqno
,
901 void (*func
)(struct vc4_seqno_cb
*cb
))
903 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
905 unsigned long irqflags
;
908 INIT_WORK(&cb
->work
, vc4_seqno_cb_work
);
910 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
911 if (seqno
> vc4
->finished_seqno
) {
913 list_add_tail(&cb
->work
.entry
, &vc4
->seqno_cb_list
);
915 schedule_work(&cb
->work
);
917 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
922 /* Scheduled when any job has been completed, this walks the list of
923 * jobs that had completed and unrefs their BOs and frees their exec
927 vc4_job_done_work(struct work_struct
*work
)
929 struct vc4_dev
*vc4
=
930 container_of(work
, struct vc4_dev
, job_done_work
);
932 vc4_job_handle_completed(vc4
);
936 vc4_wait_for_seqno_ioctl_helper(struct drm_device
*dev
,
938 uint64_t *timeout_ns
)
940 unsigned long start
= jiffies
;
941 int ret
= vc4_wait_for_seqno(dev
, seqno
, *timeout_ns
, true);
943 if ((ret
== -EINTR
|| ret
== -ERESTARTSYS
) && *timeout_ns
!= ~0ull) {
944 uint64_t delta
= jiffies_to_nsecs(jiffies
- start
);
946 if (*timeout_ns
>= delta
)
947 *timeout_ns
-= delta
;
954 vc4_wait_seqno_ioctl(struct drm_device
*dev
, void *data
,
955 struct drm_file
*file_priv
)
957 struct drm_vc4_wait_seqno
*args
= data
;
959 return vc4_wait_for_seqno_ioctl_helper(dev
, args
->seqno
,
964 vc4_wait_bo_ioctl(struct drm_device
*dev
, void *data
,
965 struct drm_file
*file_priv
)
968 struct drm_vc4_wait_bo
*args
= data
;
969 struct drm_gem_object
*gem_obj
;
975 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
977 DRM_ERROR("Failed to look up GEM BO %d\n", args
->handle
);
980 bo
= to_vc4_bo(gem_obj
);
982 ret
= vc4_wait_for_seqno_ioctl_helper(dev
, bo
->seqno
,
985 drm_gem_object_unreference_unlocked(gem_obj
);
990 * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
992 * @data: ioctl argument
993 * @file_priv: DRM file for this fd
995 * This is the main entrypoint for userspace to submit a 3D frame to
996 * the GPU. Userspace provides the binner command list (if
997 * applicable), and the kernel sets up the render command list to draw
998 * to the framebuffer described in the ioctl, using the command lists
999 * that the 3D engine's binner will produce.
1002 vc4_submit_cl_ioctl(struct drm_device
*dev
, void *data
,
1003 struct drm_file
*file_priv
)
1005 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
1006 struct drm_vc4_submit_cl
*args
= data
;
1007 struct vc4_exec_info
*exec
;
1008 struct ww_acquire_ctx acquire_ctx
;
1011 if ((args
->flags
& ~VC4_SUBMIT_CL_USE_CLEAR_COLOR
) != 0) {
1012 DRM_ERROR("Unknown flags: 0x%02x\n", args
->flags
);
1016 exec
= kcalloc(1, sizeof(*exec
), GFP_KERNEL
);
1018 DRM_ERROR("malloc failure on exec struct\n");
1022 mutex_lock(&vc4
->power_lock
);
1023 if (vc4
->power_refcount
++ == 0) {
1024 ret
= pm_runtime_get_sync(&vc4
->v3d
->pdev
->dev
);
1026 mutex_unlock(&vc4
->power_lock
);
1027 vc4
->power_refcount
--;
1032 mutex_unlock(&vc4
->power_lock
);
1035 INIT_LIST_HEAD(&exec
->unref_list
);
1037 ret
= vc4_cl_lookup_bos(dev
, file_priv
, exec
);
1041 if (exec
->args
->bin_cl_size
!= 0) {
1042 ret
= vc4_get_bcl(dev
, exec
);
1050 ret
= vc4_get_rcl(dev
, exec
);
1054 ret
= vc4_lock_bo_reservations(dev
, exec
, &acquire_ctx
);
1058 /* Clear this out of the struct we'll be putting in the queue,
1059 * since it's part of our stack.
1063 ret
= vc4_queue_submit(dev
, exec
, &acquire_ctx
);
1067 /* Return the seqno for our job. */
1068 args
->seqno
= vc4
->emit_seqno
;
1073 vc4_complete_exec(vc4
->dev
, exec
);
1079 vc4_gem_init(struct drm_device
*dev
)
1081 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
1083 vc4
->dma_fence_context
= dma_fence_context_alloc(1);
1085 INIT_LIST_HEAD(&vc4
->bin_job_list
);
1086 INIT_LIST_HEAD(&vc4
->render_job_list
);
1087 INIT_LIST_HEAD(&vc4
->job_done_list
);
1088 INIT_LIST_HEAD(&vc4
->seqno_cb_list
);
1089 spin_lock_init(&vc4
->job_lock
);
1091 INIT_WORK(&vc4
->hangcheck
.reset_work
, vc4_reset_work
);
1092 setup_timer(&vc4
->hangcheck
.timer
,
1093 vc4_hangcheck_elapsed
,
1094 (unsigned long)dev
);
1096 INIT_WORK(&vc4
->job_done_work
, vc4_job_done_work
);
1098 mutex_init(&vc4
->power_lock
);
1102 vc4_gem_destroy(struct drm_device
*dev
)
1104 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
1106 /* Waiting for exec to finish would need to be done before
1107 * unregistering V3D.
1109 WARN_ON(vc4
->emit_seqno
!= vc4
->finished_seqno
);
1111 /* V3D should already have disabled its interrupt and cleared
1112 * the overflow allocation registers. Now free the object.
1115 drm_gem_object_put_unlocked(&vc4
->bin_bo
->base
.base
);
1119 if (vc4
->hang_state
)
1120 vc4_free_hang_state(dev
, vc4
->hang_state
);
1122 vc4_bo_cache_destroy(dev
);