2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
30 #include "uapi/drm/vc4_drm.h"
33 #include "vc4_trace.h"
36 vc4_queue_hangcheck(struct drm_device
*dev
)
38 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
40 mod_timer(&vc4
->hangcheck
.timer
,
41 round_jiffies_up(jiffies
+ msecs_to_jiffies(100)));
44 struct vc4_hang_state
{
45 struct drm_vc4_get_hang_state user_state
;
48 struct drm_gem_object
**bo
;
52 vc4_free_hang_state(struct drm_device
*dev
, struct vc4_hang_state
*state
)
56 for (i
= 0; i
< state
->user_state
.bo_count
; i
++)
57 drm_gem_object_unreference_unlocked(state
->bo
[i
]);
63 vc4_get_hang_state_ioctl(struct drm_device
*dev
, void *data
,
64 struct drm_file
*file_priv
)
66 struct drm_vc4_get_hang_state
*get_state
= data
;
67 struct drm_vc4_get_hang_state_bo
*bo_state
;
68 struct vc4_hang_state
*kernel_state
;
69 struct drm_vc4_get_hang_state
*state
;
70 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
71 unsigned long irqflags
;
75 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
76 kernel_state
= vc4
->hang_state
;
78 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
81 state
= &kernel_state
->user_state
;
83 /* If the user's array isn't big enough, just return the
84 * required array size.
86 if (get_state
->bo_count
< state
->bo_count
) {
87 get_state
->bo_count
= state
->bo_count
;
88 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
92 vc4
->hang_state
= NULL
;
93 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
95 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
96 state
->bo
= get_state
->bo
;
97 memcpy(get_state
, state
, sizeof(*state
));
99 bo_state
= kcalloc(state
->bo_count
, sizeof(*bo_state
), GFP_KERNEL
);
105 for (i
= 0; i
< state
->bo_count
; i
++) {
106 struct vc4_bo
*vc4_bo
= to_vc4_bo(kernel_state
->bo
[i
]);
109 ret
= drm_gem_handle_create(file_priv
, kernel_state
->bo
[i
],
114 goto err_delete_handle
;
116 bo_state
[i
].handle
= handle
;
117 bo_state
[i
].paddr
= vc4_bo
->base
.paddr
;
118 bo_state
[i
].size
= vc4_bo
->base
.base
.size
;
121 if (copy_to_user((void __user
*)(uintptr_t)get_state
->bo
,
123 state
->bo_count
* sizeof(*bo_state
)))
128 for (i
= 0; i
< state
->bo_count
; i
++)
129 drm_gem_handle_delete(file_priv
, bo_state
[i
].handle
);
133 vc4_free_hang_state(dev
, kernel_state
);
140 vc4_save_hang_state(struct drm_device
*dev
)
142 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
143 struct drm_vc4_get_hang_state
*state
;
144 struct vc4_hang_state
*kernel_state
;
145 struct vc4_exec_info
*exec
[2];
147 unsigned long irqflags
;
148 unsigned int i
, j
, unref_list_count
, prev_idx
;
150 kernel_state
= kcalloc(1, sizeof(*kernel_state
), GFP_KERNEL
);
154 state
= &kernel_state
->user_state
;
156 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
157 exec
[0] = vc4_first_bin_job(vc4
);
158 exec
[1] = vc4_first_render_job(vc4
);
159 if (!exec
[0] && !exec
[1]) {
160 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
164 /* Get the bos from both binner and renderer into hang state. */
166 for (i
= 0; i
< 2; i
++) {
170 unref_list_count
= 0;
171 list_for_each_entry(bo
, &exec
[i
]->unref_list
, unref_head
)
173 state
->bo_count
+= exec
[i
]->bo_count
+ unref_list_count
;
176 kernel_state
->bo
= kcalloc(state
->bo_count
,
177 sizeof(*kernel_state
->bo
), GFP_ATOMIC
);
179 if (!kernel_state
->bo
) {
180 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
185 for (i
= 0; i
< 2; i
++) {
189 for (j
= 0; j
< exec
[i
]->bo_count
; j
++) {
190 drm_gem_object_reference(&exec
[i
]->bo
[j
]->base
);
191 kernel_state
->bo
[j
+ prev_idx
] = &exec
[i
]->bo
[j
]->base
;
194 list_for_each_entry(bo
, &exec
[i
]->unref_list
, unref_head
) {
195 drm_gem_object_reference(&bo
->base
.base
);
196 kernel_state
->bo
[j
+ prev_idx
] = &bo
->base
.base
;
203 state
->start_bin
= exec
[0]->ct0ca
;
205 state
->start_render
= exec
[1]->ct1ca
;
207 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
209 state
->ct0ca
= V3D_READ(V3D_CTNCA(0));
210 state
->ct0ea
= V3D_READ(V3D_CTNEA(0));
212 state
->ct1ca
= V3D_READ(V3D_CTNCA(1));
213 state
->ct1ea
= V3D_READ(V3D_CTNEA(1));
215 state
->ct0cs
= V3D_READ(V3D_CTNCS(0));
216 state
->ct1cs
= V3D_READ(V3D_CTNCS(1));
218 state
->ct0ra0
= V3D_READ(V3D_CT00RA0
);
219 state
->ct1ra0
= V3D_READ(V3D_CT01RA0
);
221 state
->bpca
= V3D_READ(V3D_BPCA
);
222 state
->bpcs
= V3D_READ(V3D_BPCS
);
223 state
->bpoa
= V3D_READ(V3D_BPOA
);
224 state
->bpos
= V3D_READ(V3D_BPOS
);
226 state
->vpmbase
= V3D_READ(V3D_VPMBASE
);
228 state
->dbge
= V3D_READ(V3D_DBGE
);
229 state
->fdbgo
= V3D_READ(V3D_FDBGO
);
230 state
->fdbgb
= V3D_READ(V3D_FDBGB
);
231 state
->fdbgr
= V3D_READ(V3D_FDBGR
);
232 state
->fdbgs
= V3D_READ(V3D_FDBGS
);
233 state
->errstat
= V3D_READ(V3D_ERRSTAT
);
235 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
236 if (vc4
->hang_state
) {
237 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
238 vc4_free_hang_state(dev
, kernel_state
);
240 vc4
->hang_state
= kernel_state
;
241 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
246 vc4_reset(struct drm_device
*dev
)
248 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
250 DRM_INFO("Resetting GPU.\n");
252 mutex_lock(&vc4
->power_lock
);
253 if (vc4
->power_refcount
) {
254 /* Power the device off and back on the by dropping the
255 * reference on runtime PM.
257 pm_runtime_put_sync_suspend(&vc4
->v3d
->pdev
->dev
);
258 pm_runtime_get_sync(&vc4
->v3d
->pdev
->dev
);
260 mutex_unlock(&vc4
->power_lock
);
264 /* Rearm the hangcheck -- another job might have been waiting
265 * for our hung one to get kicked off, and vc4_irq_reset()
266 * would have started it.
268 vc4_queue_hangcheck(dev
);
272 vc4_reset_work(struct work_struct
*work
)
274 struct vc4_dev
*vc4
=
275 container_of(work
, struct vc4_dev
, hangcheck
.reset_work
);
277 vc4_save_hang_state(vc4
->dev
);
283 vc4_hangcheck_elapsed(unsigned long data
)
285 struct drm_device
*dev
= (struct drm_device
*)data
;
286 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
287 uint32_t ct0ca
, ct1ca
;
288 unsigned long irqflags
;
289 struct vc4_exec_info
*bin_exec
, *render_exec
;
291 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
293 bin_exec
= vc4_first_bin_job(vc4
);
294 render_exec
= vc4_first_render_job(vc4
);
296 /* If idle, we can stop watching for hangs. */
297 if (!bin_exec
&& !render_exec
) {
298 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
302 ct0ca
= V3D_READ(V3D_CTNCA(0));
303 ct1ca
= V3D_READ(V3D_CTNCA(1));
305 /* If we've made any progress in execution, rearm the timer
308 if ((bin_exec
&& ct0ca
!= bin_exec
->last_ct0ca
) ||
309 (render_exec
&& ct1ca
!= render_exec
->last_ct1ca
)) {
311 bin_exec
->last_ct0ca
= ct0ca
;
313 render_exec
->last_ct1ca
= ct1ca
;
314 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
315 vc4_queue_hangcheck(dev
);
319 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
321 /* We've gone too long with no progress, reset. This has to
322 * be done from a work struct, since resetting can sleep and
323 * this timer hook isn't allowed to.
325 schedule_work(&vc4
->hangcheck
.reset_work
);
329 submit_cl(struct drm_device
*dev
, uint32_t thread
, uint32_t start
, uint32_t end
)
331 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
333 /* Set the current and end address of the control list.
334 * Writing the end register is what starts the job.
336 V3D_WRITE(V3D_CTNCA(thread
), start
);
337 V3D_WRITE(V3D_CTNEA(thread
), end
);
341 vc4_wait_for_seqno(struct drm_device
*dev
, uint64_t seqno
, uint64_t timeout_ns
,
344 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
346 unsigned long timeout_expire
;
349 if (vc4
->finished_seqno
>= seqno
)
355 timeout_expire
= jiffies
+ nsecs_to_jiffies(timeout_ns
);
357 trace_vc4_wait_for_seqno_begin(dev
, seqno
, timeout_ns
);
359 prepare_to_wait(&vc4
->job_wait_queue
, &wait
,
360 interruptible
? TASK_INTERRUPTIBLE
:
361 TASK_UNINTERRUPTIBLE
);
363 if (interruptible
&& signal_pending(current
)) {
368 if (vc4
->finished_seqno
>= seqno
)
371 if (timeout_ns
!= ~0ull) {
372 if (time_after_eq(jiffies
, timeout_expire
)) {
376 schedule_timeout(timeout_expire
- jiffies
);
382 finish_wait(&vc4
->job_wait_queue
, &wait
);
383 trace_vc4_wait_for_seqno_end(dev
, seqno
);
389 vc4_flush_caches(struct drm_device
*dev
)
391 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
393 /* Flush the GPU L2 caches. These caches sit on top of system
394 * L3 (the 128kb or so shared with the CPU), and are
395 * non-allocating in the L3.
397 V3D_WRITE(V3D_L2CACTL
,
400 V3D_WRITE(V3D_SLCACTL
,
401 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC
) |
402 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC
) |
403 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC
) |
404 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC
));
407 /* Sets the registers for the next job to be actually be executed in
410 * The job_lock should be held during this.
413 vc4_submit_next_bin_job(struct drm_device
*dev
)
415 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
416 struct vc4_exec_info
*exec
;
419 exec
= vc4_first_bin_job(vc4
);
423 vc4_flush_caches(dev
);
425 /* Either put the job in the binner if it uses the binner, or
426 * immediately move it to the to-be-rendered queue.
428 if (exec
->ct0ca
!= exec
->ct0ea
) {
429 submit_cl(dev
, 0, exec
->ct0ca
, exec
->ct0ea
);
431 vc4_move_job_to_render(dev
, exec
);
437 vc4_submit_next_render_job(struct drm_device
*dev
)
439 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
440 struct vc4_exec_info
*exec
= vc4_first_render_job(vc4
);
445 submit_cl(dev
, 1, exec
->ct1ca
, exec
->ct1ea
);
449 vc4_move_job_to_render(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
451 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
452 bool was_empty
= list_empty(&vc4
->render_job_list
);
454 list_move_tail(&exec
->head
, &vc4
->render_job_list
);
456 vc4_submit_next_render_job(dev
);
460 vc4_update_bo_seqnos(struct vc4_exec_info
*exec
, uint64_t seqno
)
465 for (i
= 0; i
< exec
->bo_count
; i
++) {
466 bo
= to_vc4_bo(&exec
->bo
[i
]->base
);
470 list_for_each_entry(bo
, &exec
->unref_list
, unref_head
) {
474 for (i
= 0; i
< exec
->rcl_write_bo_count
; i
++) {
475 bo
= to_vc4_bo(&exec
->rcl_write_bo
[i
]->base
);
476 bo
->write_seqno
= seqno
;
480 /* Queues a struct vc4_exec_info for execution. If no job is
481 * currently executing, then submits it.
483 * Unlike most GPUs, our hardware only handles one command list at a
484 * time. To queue multiple jobs at once, we'd need to edit the
485 * previous command list to have a jump to the new one at the end, and
486 * then bump the end address. That's a change for a later date,
490 vc4_queue_submit(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
492 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
494 unsigned long irqflags
;
496 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
498 seqno
= ++vc4
->emit_seqno
;
500 vc4_update_bo_seqnos(exec
, seqno
);
502 list_add_tail(&exec
->head
, &vc4
->bin_job_list
);
504 /* If no job was executing, kick ours off. Otherwise, it'll
505 * get started when the previous job's flush done interrupt
508 if (vc4_first_bin_job(vc4
) == exec
) {
509 vc4_submit_next_bin_job(dev
);
510 vc4_queue_hangcheck(dev
);
513 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
517 * Looks up a bunch of GEM handles for BOs and stores the array for
518 * use in the command validator that actually writes relocated
519 * addresses pointing to them.
522 vc4_cl_lookup_bos(struct drm_device
*dev
,
523 struct drm_file
*file_priv
,
524 struct vc4_exec_info
*exec
)
526 struct drm_vc4_submit_cl
*args
= exec
->args
;
531 exec
->bo_count
= args
->bo_handle_count
;
533 if (!exec
->bo_count
) {
534 /* See comment on bo_index for why we have to check
537 DRM_ERROR("Rendering requires BOs to validate\n");
541 exec
->bo
= drm_calloc_large(exec
->bo_count
,
542 sizeof(struct drm_gem_cma_object
*));
544 DRM_ERROR("Failed to allocate validated BO pointers\n");
548 handles
= drm_malloc_ab(exec
->bo_count
, sizeof(uint32_t));
551 DRM_ERROR("Failed to allocate incoming GEM handles\n");
555 if (copy_from_user(handles
,
556 (void __user
*)(uintptr_t)args
->bo_handles
,
557 exec
->bo_count
* sizeof(uint32_t))) {
559 DRM_ERROR("Failed to copy in GEM handles\n");
563 spin_lock(&file_priv
->table_lock
);
564 for (i
= 0; i
< exec
->bo_count
; i
++) {
565 struct drm_gem_object
*bo
= idr_find(&file_priv
->object_idr
,
568 DRM_ERROR("Failed to look up GEM BO %d: %d\n",
571 spin_unlock(&file_priv
->table_lock
);
574 drm_gem_object_reference(bo
);
575 exec
->bo
[i
] = (struct drm_gem_cma_object
*)bo
;
577 spin_unlock(&file_priv
->table_lock
);
580 drm_free_large(handles
);
585 vc4_get_bcl(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
587 struct drm_vc4_submit_cl
*args
= exec
->args
;
591 uint32_t bin_offset
= 0;
592 uint32_t shader_rec_offset
= roundup(bin_offset
+ args
->bin_cl_size
,
594 uint32_t uniforms_offset
= shader_rec_offset
+ args
->shader_rec_size
;
595 uint32_t exec_size
= uniforms_offset
+ args
->uniforms_size
;
596 uint32_t temp_size
= exec_size
+ (sizeof(struct vc4_shader_state
) *
597 args
->shader_rec_count
);
600 if (shader_rec_offset
< args
->bin_cl_size
||
601 uniforms_offset
< shader_rec_offset
||
602 exec_size
< uniforms_offset
||
603 args
->shader_rec_count
>= (UINT_MAX
/
604 sizeof(struct vc4_shader_state
)) ||
605 temp_size
< exec_size
) {
606 DRM_ERROR("overflow in exec arguments\n");
611 /* Allocate space where we'll store the copied in user command lists
612 * and shader records.
614 * We don't just copy directly into the BOs because we need to
615 * read the contents back for validation, and I think the
616 * bo->vaddr is uncached access.
618 temp
= drm_malloc_ab(temp_size
, 1);
620 DRM_ERROR("Failed to allocate storage for copying "
621 "in bin/render CLs.\n");
625 bin
= temp
+ bin_offset
;
626 exec
->shader_rec_u
= temp
+ shader_rec_offset
;
627 exec
->uniforms_u
= temp
+ uniforms_offset
;
628 exec
->shader_state
= temp
+ exec_size
;
629 exec
->shader_state_size
= args
->shader_rec_count
;
631 if (copy_from_user(bin
,
632 (void __user
*)(uintptr_t)args
->bin_cl
,
633 args
->bin_cl_size
)) {
638 if (copy_from_user(exec
->shader_rec_u
,
639 (void __user
*)(uintptr_t)args
->shader_rec
,
640 args
->shader_rec_size
)) {
645 if (copy_from_user(exec
->uniforms_u
,
646 (void __user
*)(uintptr_t)args
->uniforms
,
647 args
->uniforms_size
)) {
652 bo
= vc4_bo_create(dev
, exec_size
, true);
654 DRM_ERROR("Couldn't allocate BO for binning\n");
658 exec
->exec_bo
= &bo
->base
;
660 list_add_tail(&to_vc4_bo(&exec
->exec_bo
->base
)->unref_head
,
663 exec
->ct0ca
= exec
->exec_bo
->paddr
+ bin_offset
;
667 exec
->shader_rec_v
= exec
->exec_bo
->vaddr
+ shader_rec_offset
;
668 exec
->shader_rec_p
= exec
->exec_bo
->paddr
+ shader_rec_offset
;
669 exec
->shader_rec_size
= args
->shader_rec_size
;
671 exec
->uniforms_v
= exec
->exec_bo
->vaddr
+ uniforms_offset
;
672 exec
->uniforms_p
= exec
->exec_bo
->paddr
+ uniforms_offset
;
673 exec
->uniforms_size
= args
->uniforms_size
;
675 ret
= vc4_validate_bin_cl(dev
,
676 exec
->exec_bo
->vaddr
+ bin_offset
,
682 ret
= vc4_validate_shader_recs(dev
, exec
);
686 /* Block waiting on any previous rendering into the CS's VBO,
687 * IB, or textures, so that pixels are actually written by the
688 * time we try to read them.
690 ret
= vc4_wait_for_seqno(dev
, exec
->bin_dep_seqno
, ~0ull, true);
693 drm_free_large(temp
);
698 vc4_complete_exec(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
700 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
704 for (i
= 0; i
< exec
->bo_count
; i
++)
705 drm_gem_object_unreference_unlocked(&exec
->bo
[i
]->base
);
706 drm_free_large(exec
->bo
);
709 while (!list_empty(&exec
->unref_list
)) {
710 struct vc4_bo
*bo
= list_first_entry(&exec
->unref_list
,
711 struct vc4_bo
, unref_head
);
712 list_del(&bo
->unref_head
);
713 drm_gem_object_unreference_unlocked(&bo
->base
.base
);
716 mutex_lock(&vc4
->power_lock
);
717 if (--vc4
->power_refcount
== 0) {
718 pm_runtime_mark_last_busy(&vc4
->v3d
->pdev
->dev
);
719 pm_runtime_put_autosuspend(&vc4
->v3d
->pdev
->dev
);
721 mutex_unlock(&vc4
->power_lock
);
727 vc4_job_handle_completed(struct vc4_dev
*vc4
)
729 unsigned long irqflags
;
730 struct vc4_seqno_cb
*cb
, *cb_temp
;
732 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
733 while (!list_empty(&vc4
->job_done_list
)) {
734 struct vc4_exec_info
*exec
=
735 list_first_entry(&vc4
->job_done_list
,
736 struct vc4_exec_info
, head
);
737 list_del(&exec
->head
);
739 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
740 vc4_complete_exec(vc4
->dev
, exec
);
741 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
744 list_for_each_entry_safe(cb
, cb_temp
, &vc4
->seqno_cb_list
, work
.entry
) {
745 if (cb
->seqno
<= vc4
->finished_seqno
) {
746 list_del_init(&cb
->work
.entry
);
747 schedule_work(&cb
->work
);
751 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
754 static void vc4_seqno_cb_work(struct work_struct
*work
)
756 struct vc4_seqno_cb
*cb
= container_of(work
, struct vc4_seqno_cb
, work
);
761 int vc4_queue_seqno_cb(struct drm_device
*dev
,
762 struct vc4_seqno_cb
*cb
, uint64_t seqno
,
763 void (*func
)(struct vc4_seqno_cb
*cb
))
765 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
767 unsigned long irqflags
;
770 INIT_WORK(&cb
->work
, vc4_seqno_cb_work
);
772 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
773 if (seqno
> vc4
->finished_seqno
) {
775 list_add_tail(&cb
->work
.entry
, &vc4
->seqno_cb_list
);
777 schedule_work(&cb
->work
);
779 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
784 /* Scheduled when any job has been completed, this walks the list of
785 * jobs that had completed and unrefs their BOs and frees their exec
789 vc4_job_done_work(struct work_struct
*work
)
791 struct vc4_dev
*vc4
=
792 container_of(work
, struct vc4_dev
, job_done_work
);
794 vc4_job_handle_completed(vc4
);
798 vc4_wait_for_seqno_ioctl_helper(struct drm_device
*dev
,
800 uint64_t *timeout_ns
)
802 unsigned long start
= jiffies
;
803 int ret
= vc4_wait_for_seqno(dev
, seqno
, *timeout_ns
, true);
805 if ((ret
== -EINTR
|| ret
== -ERESTARTSYS
) && *timeout_ns
!= ~0ull) {
806 uint64_t delta
= jiffies_to_nsecs(jiffies
- start
);
808 if (*timeout_ns
>= delta
)
809 *timeout_ns
-= delta
;
816 vc4_wait_seqno_ioctl(struct drm_device
*dev
, void *data
,
817 struct drm_file
*file_priv
)
819 struct drm_vc4_wait_seqno
*args
= data
;
821 return vc4_wait_for_seqno_ioctl_helper(dev
, args
->seqno
,
826 vc4_wait_bo_ioctl(struct drm_device
*dev
, void *data
,
827 struct drm_file
*file_priv
)
830 struct drm_vc4_wait_bo
*args
= data
;
831 struct drm_gem_object
*gem_obj
;
837 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
839 DRM_ERROR("Failed to look up GEM BO %d\n", args
->handle
);
842 bo
= to_vc4_bo(gem_obj
);
844 ret
= vc4_wait_for_seqno_ioctl_helper(dev
, bo
->seqno
,
847 drm_gem_object_unreference_unlocked(gem_obj
);
852 * Submits a command list to the VC4.
854 * This is what is called batchbuffer emitting on other hardware.
857 vc4_submit_cl_ioctl(struct drm_device
*dev
, void *data
,
858 struct drm_file
*file_priv
)
860 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
861 struct drm_vc4_submit_cl
*args
= data
;
862 struct vc4_exec_info
*exec
;
865 if ((args
->flags
& ~VC4_SUBMIT_CL_USE_CLEAR_COLOR
) != 0) {
866 DRM_ERROR("Unknown flags: 0x%02x\n", args
->flags
);
870 exec
= kcalloc(1, sizeof(*exec
), GFP_KERNEL
);
872 DRM_ERROR("malloc failure on exec struct\n");
876 mutex_lock(&vc4
->power_lock
);
877 if (vc4
->power_refcount
++ == 0)
878 ret
= pm_runtime_get_sync(&vc4
->v3d
->pdev
->dev
);
879 mutex_unlock(&vc4
->power_lock
);
886 INIT_LIST_HEAD(&exec
->unref_list
);
888 ret
= vc4_cl_lookup_bos(dev
, file_priv
, exec
);
892 if (exec
->args
->bin_cl_size
!= 0) {
893 ret
= vc4_get_bcl(dev
, exec
);
901 ret
= vc4_get_rcl(dev
, exec
);
905 /* Clear this out of the struct we'll be putting in the queue,
906 * since it's part of our stack.
910 vc4_queue_submit(dev
, exec
);
912 /* Return the seqno for our job. */
913 args
->seqno
= vc4
->emit_seqno
;
918 vc4_complete_exec(vc4
->dev
, exec
);
924 vc4_gem_init(struct drm_device
*dev
)
926 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
928 INIT_LIST_HEAD(&vc4
->bin_job_list
);
929 INIT_LIST_HEAD(&vc4
->render_job_list
);
930 INIT_LIST_HEAD(&vc4
->job_done_list
);
931 INIT_LIST_HEAD(&vc4
->seqno_cb_list
);
932 spin_lock_init(&vc4
->job_lock
);
934 INIT_WORK(&vc4
->hangcheck
.reset_work
, vc4_reset_work
);
935 setup_timer(&vc4
->hangcheck
.timer
,
936 vc4_hangcheck_elapsed
,
939 INIT_WORK(&vc4
->job_done_work
, vc4_job_done_work
);
941 mutex_init(&vc4
->power_lock
);
945 vc4_gem_destroy(struct drm_device
*dev
)
947 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
949 /* Waiting for exec to finish would need to be done before
952 WARN_ON(vc4
->emit_seqno
!= vc4
->finished_seqno
);
954 /* V3D should already have disabled its interrupt and cleared
955 * the overflow allocation registers. Now free the object.
957 if (vc4
->overflow_mem
) {
958 drm_gem_object_unreference_unlocked(&vc4
->overflow_mem
->base
.base
);
959 vc4
->overflow_mem
= NULL
;
963 vc4_free_hang_state(dev
, vc4
->hang_state
);
965 vc4_bo_cache_destroy(dev
);